diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index fa3db32e6a0c8..399c4838d2b14 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -69,7 +69,7 @@ class ClusterConfiguration { */ @Input Closure minimumMasterNodes = { - if (bwcVersion != null && bwcVersion.before("6.5.0-SNAPSHOT")) { + if (bwcVersion != null && bwcVersion.before("6.5.0")) { return numNodes > 1 ? numNodes : -1 } else { return numNodes > 1 ? numNodes.intdiv(2) + 1 : -1 diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 8c7791c133e47..28c212235f371 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -127,7 +127,7 @@ class ClusterFormationTasks { nodes.add(node) Closure writeConfigSetup Object dependsOn - if (node.nodeVersion.onOrAfter("6.5.0-SNAPSHOT")) { + if (node.nodeVersion.onOrAfter("6.5.0")) { writeConfigSetup = { Map esConfig -> // Don't force discovery provider if one is set by the test cluster specs already if (esConfig.containsKey('discovery.zen.hosts_provider') == false) { @@ -140,7 +140,7 @@ class ClusterFormationTasks { } else { dependsOn = startTasks.empty ? startDependencies : startTasks.get(0) writeConfigSetup = { Map esConfig -> - String unicastTransportUri = node.config.unicastTransportUri(nodes.get(0), node, project.ant) + String unicastTransportUri = node.config.unicastTransportUri(nodes.get(0), node, project.createAntBuilder()) if (unicastTransportUri == null) { esConfig['discovery.zen.ping.unicast.hosts'] = [] } else { @@ -717,7 +717,7 @@ class ClusterFormationTasks { Collection unicastHosts = new HashSet<>() nodes.forEach { node -> unicastHosts.addAll(node.config.otherUnicastHostAddresses.call()) - String unicastHost = node.config.unicastTransportUri(node, null, project.ant) + String unicastHost = node.config.unicastTransportUri(node, null, project.createAntBuilder()) if (unicastHost != null) { unicastHosts.add(unicastHost) } @@ -913,9 +913,10 @@ class ClusterFormationTasks { outputPrintStream: outputStream, messageOutputLevel: org.apache.tools.ant.Project.MSG_INFO) - project.ant.project.addBuildListener(listener) - Object retVal = command(project.ant) - project.ant.project.removeBuildListener(listener) + AntBuilder ant = project.createAntBuilder() + ant.project.addBuildListener(listener) + Object retVal = command(ant) + ant.project.removeBuildListener(listener) return retVal } diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 9c776d419b207..09f0062352d6e 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -45,50 +45,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -183,19 +139,6 @@ - - - - - - - - - - - - - diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java index 02bbd89a3ea63..aca9906701150 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java @@ -23,7 +23,6 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.GradleRunner; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Rule; import org.junit.rules.TemporaryFolder; @@ -39,7 +38,6 @@ import java.util.Objects; import java.util.stream.Collectors; -@Ignore public class BuildExamplePluginsIT extends GradleIntegrationTestCase { private static List EXAMPLE_PLUGINS = Collections.unmodifiableList( diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java index 940eff47253f1..f153919ac06d2 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java @@ -55,6 +55,9 @@ public void testUseClusterByTwo() { } public void testUseClusterByUpToDateTask() { + // Run it once, ignoring the result and again to make sure it's considered up to date. + // Gradle randomly considers tasks without inputs and outputs as as up-to-date or success on the first run + getTestClustersRunner("upToDate1", "upToDate2").build(); BuildResult result = getTestClustersRunner("upToDate1", "upToDate2").build(); assertTaskUpToDate(result, ":upToDate1", ":upToDate2"); assertNotStarted(result); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java index d88dfa3c92bbf..bf99c37c81745 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java @@ -47,8 +47,8 @@ public class IndexLifecycleClient { } /** - * Retrieve one or more lifecycle policy definition - * See + * Retrieve one or more lifecycle policy definition. See + * * the docs for more. * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized @@ -62,8 +62,8 @@ public GetLifecyclePolicyResponse getLifecyclePolicy(GetLifecyclePolicyRequest r } /** - * Asynchronously retrieve one or more lifecycle policy definition - * See + * Asynchronously retrieve one or more lifecycle policy definition. See + * * the docs for more. * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java index 6d7fefe907fc6..4c3090680664d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java @@ -32,6 +32,7 @@ import org.elasticsearch.client.ml.DeleteDatafeedRequest; import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; +import org.elasticsearch.client.ml.DeleteModelSnapshotRequest; import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.ForecastJobRequest; import org.elasticsearch.client.ml.GetBucketsRequest; @@ -56,6 +57,7 @@ import org.elasticsearch.client.ml.StartDatafeedRequest; import org.elasticsearch.client.ml.StopDatafeedRequest; import org.elasticsearch.client.ml.UpdateDatafeedRequest; +import org.elasticsearch.client.ml.UpdateFilterRequest; import org.elasticsearch.client.ml.UpdateJobRequest; import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.Strings; @@ -334,6 +336,18 @@ static Request deleteForecast(DeleteForecastRequest deleteForecastRequest) { return request; } + static Request deleteModelSnapshot(DeleteModelSnapshotRequest deleteModelSnapshotRequest) { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(deleteModelSnapshotRequest.getJobId()) + .addPathPartAsIs("model_snapshots") + .addPathPart(deleteModelSnapshotRequest.getSnapshotId()) + .build(); + return new Request(HttpDelete.METHOD_NAME, endpoint); + } + static Request getBuckets(GetBucketsRequest getBucketsRequest) throws IOException { String endpoint = new EndpointBuilder() .addPathPartAsIs("_xpack") @@ -510,4 +524,17 @@ static Request getFilter(GetFiltersRequest getFiltersRequest) { } return request; } + + static Request updateFilter(UpdateFilterRequest updateFilterRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("filters") + .addPathPart(updateFilterRequest.getFilterId()) + .addPathPartAsIs("_update") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + request.setEntity(createEntity(updateFilterRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java index e327f9b2a5812..7afc5a462ed21 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java @@ -27,6 +27,7 @@ import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.DeleteJobResponse; +import org.elasticsearch.client.ml.DeleteModelSnapshotRequest; import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.FlushJobResponse; import org.elasticsearch.client.ml.ForecastJobRequest; @@ -74,6 +75,7 @@ import org.elasticsearch.client.ml.StopDatafeedRequest; import org.elasticsearch.client.ml.StopDatafeedResponse; import org.elasticsearch.client.ml.UpdateDatafeedRequest; +import org.elasticsearch.client.ml.UpdateFilterRequest; import org.elasticsearch.client.ml.UpdateJobRequest; import org.elasticsearch.client.ml.job.stats.JobStats; @@ -463,6 +465,47 @@ public void deleteForecastAsync(DeleteForecastRequest request, RequestOptions op Collections.emptySet()); } + /** + * Deletes Machine Learning Model Snapshots + *

+ * For additional info + * see + * ML Delete Model Snapshot documentation + * + * @param request The request to delete the model snapshot + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return action acknowledgement + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public AcknowledgedResponse deleteModelSnapshot(DeleteModelSnapshotRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::deleteModelSnapshot, + options, + AcknowledgedResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Deletes Machine Learning Model Snapshots asynchronously and notifies the listener on completion + *

+ * For additional info + * see + * ML Delete Model Snapshot documentation + * + * @param request The request to delete the model snapshot + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void deleteModelSnapshotAsync(DeleteModelSnapshotRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::deleteModelSnapshot, + options, + AcknowledgedResponse::fromXContent, + listener, + Collections.emptySet()); + } + /** * Creates a new Machine Learning Datafeed *

@@ -1288,4 +1331,44 @@ public void getFilterAsync(GetFiltersRequest request, RequestOptions options, Ac listener, Collections.emptySet()); } + + /** + * Updates a Machine Learning Filter + *

+ * For additional info + * see + * ML Update Filter documentation + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return PutFilterResponse with the updated {@link org.elasticsearch.client.ml.job.config.MlFilter} object + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public PutFilterResponse updateFilter(UpdateFilterRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::updateFilter, + options, + PutFilterResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Updates a Machine Learning Filter asynchronously and notifies listener on completion + *

+ * For additional info + * see + * ML Update Filter documentation + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void updateFilterAsync(UpdateFilterRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::updateFilter, + options, + PutFilterResponse::fromXContent, + listener, + Collections.emptySet()); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index d448275d35845..7f6b422d866c0 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -50,6 +50,7 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.core.CountRequest; +import org.elasticsearch.client.core.TermVectorsRequest; import org.elasticsearch.client.security.RefreshPolicy; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Nullable; @@ -78,7 +79,6 @@ import org.elasticsearch.script.mustache.SearchTemplateRequest; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.client.core.TermVectorsRequest; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -264,7 +264,7 @@ private static Request getStyleRequest(String method, GetRequest getRequest) { return request; } - + static Request sourceExists(GetRequest getRequest) { Request request = new Request(HttpHead.METHOD_NAME, endpoint(getRequest.index(), getRequest.type(), getRequest.id(), "_source")); @@ -275,7 +275,7 @@ static Request sourceExists(GetRequest getRequest) { parameters.withRealtime(getRequest.realtime()); // Version params are not currently supported by the source exists API so are not passed return request; - } + } static Request multiGet(MultiGetRequest multiGetRequest) throws IOException { Request request = new Request(HttpPost.METHOD_NAME, "/_mget"); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java index b26b5d664dd0b..5d507c1680976 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java @@ -20,8 +20,8 @@ package org.elasticsearch.client; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.core.AcknowledgedResponse; import org.elasticsearch.client.rollup.DeleteRollupJobRequest; -import org.elasticsearch.client.rollup.DeleteRollupJobResponse; import org.elasticsearch.client.rollup.GetRollupIndexCapsRequest; import org.elasticsearch.client.rollup.GetRollupIndexCapsResponse; import org.elasticsearch.client.rollup.GetRollupJobRequest; @@ -31,7 +31,6 @@ import org.elasticsearch.client.rollup.GetRollupJobRequest; import org.elasticsearch.client.rollup.GetRollupJobResponse; import org.elasticsearch.client.rollup.PutRollupJobRequest; -import org.elasticsearch.client.rollup.PutRollupJobResponse; import org.elasticsearch.client.rollup.StartRollupJobRequest; import org.elasticsearch.client.rollup.StartRollupJobResponse; import org.elasticsearch.client.rollup.StopRollupJobRequest; @@ -64,11 +63,11 @@ public class RollupClient { * @return the response * @throws IOException in case there is a problem sending the request or parsing back the response */ - public PutRollupJobResponse putRollupJob(PutRollupJobRequest request, RequestOptions options) throws IOException { + public AcknowledgedResponse putRollupJob(PutRollupJobRequest request, RequestOptions options) throws IOException { return restHighLevelClient.performRequestAndParseEntity(request, RollupRequestConverters::putJob, options, - PutRollupJobResponse::fromXContent, + AcknowledgedResponse::fromXContent, Collections.emptySet()); } @@ -80,11 +79,11 @@ public PutRollupJobResponse putRollupJob(PutRollupJobRequest request, RequestOpt * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion */ - public void putRollupJobAsync(PutRollupJobRequest request, RequestOptions options, ActionListener listener) { + public void putRollupJobAsync(PutRollupJobRequest request, RequestOptions options, ActionListener listener) { restHighLevelClient.performRequestAsyncAndParseEntity(request, RollupRequestConverters::putJob, options, - PutRollupJobResponse::fromXContent, + AcknowledgedResponse::fromXContent, listener, Collections.emptySet()); } @@ -165,11 +164,11 @@ public void stopRollupJobAsync(StopRollupJobRequest request, RequestOptions opti * @return the response * @throws IOException in case there is a problem sending the request or parsing back the response */ - public DeleteRollupJobResponse deleteRollupJob(DeleteRollupJobRequest request, RequestOptions options) throws IOException { + public AcknowledgedResponse deleteRollupJob(DeleteRollupJobRequest request, RequestOptions options) throws IOException { return restHighLevelClient.performRequestAndParseEntity(request, RollupRequestConverters::deleteJob, options, - DeleteRollupJobResponse::fromXContent, + AcknowledgedResponse::fromXContent, Collections.emptySet()); } /** @@ -182,11 +181,11 @@ public DeleteRollupJobResponse deleteRollupJob(DeleteRollupJobRequest request, R */ public void deleteRollupJobAsync(DeleteRollupJobRequest request, RequestOptions options, - ActionListener listener) { + ActionListener listener) { restHighLevelClient.performRequestAsyncAndParseEntity(request, RollupRequestConverters::deleteJob, options, - DeleteRollupJobResponse::fromXContent, + AcknowledgedResponse::fromXContent, listener, Collections.emptySet()); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java index 175c74ee76bdb..7fca7c89b541d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java @@ -65,7 +65,14 @@ static Request stopJob(final StopRollupJobRequest stopRollupJobRequest) throws I .addPathPart(stopRollupJobRequest.getJobId()) .addPathPartAsIs("_stop") .build(); - return new Request(HttpPost.METHOD_NAME, endpoint); + + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withTimeout(stopRollupJobRequest.timeout()); + if (stopRollupJobRequest.waitForCompletion() != null) { + parameters.withWaitForCompletion(stopRollupJobRequest.waitForCompletion()); + } + return request; } static Request getJob(final GetRollupJobRequest getRollupJobRequest) { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java index 68bb9b9a28b99..93d29056a707a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java @@ -42,6 +42,8 @@ import org.elasticsearch.client.security.GetRoleMappingsResponse; import org.elasticsearch.client.security.GetSslCertificatesRequest; import org.elasticsearch.client.security.GetSslCertificatesResponse; +import org.elasticsearch.client.security.HasPrivilegesRequest; +import org.elasticsearch.client.security.HasPrivilegesResponse; import org.elasticsearch.client.security.InvalidateTokenRequest; import org.elasticsearch.client.security.InvalidateTokenResponse; import org.elasticsearch.client.security.PutRoleMappingRequest; @@ -244,6 +246,34 @@ public void authenticateAsync(RequestOptions options, ActionListener + * the docs for more. + * + * @param request the request with the privileges to check + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response from the has privileges call + */ + public HasPrivilegesResponse hasPrivileges(HasPrivilegesRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, SecurityRequestConverters::hasPrivileges, options, + HasPrivilegesResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously determine whether the current user has a specified list of privileges + * See + * the docs for more. + * + * @param request the request with the privileges to check + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void hasPrivilegesAsync(HasPrivilegesRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::hasPrivileges, options, + HasPrivilegesResponse::fromXContent, listener, emptySet()); + } + /** * Clears the cache in one or more realms. * See diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java index 160aa1fd82b0a..216085af78a38 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java @@ -30,6 +30,7 @@ import org.elasticsearch.client.security.DeletePrivilegesRequest; import org.elasticsearch.client.security.DeleteRoleMappingRequest; import org.elasticsearch.client.security.DeleteRoleRequest; +import org.elasticsearch.client.security.HasPrivilegesRequest; import org.elasticsearch.client.security.DisableUserRequest; import org.elasticsearch.client.security.EnableUserRequest; import org.elasticsearch.client.security.GetRoleMappingsRequest; @@ -114,6 +115,12 @@ private static Request setUserEnabled(SetUserEnabledRequest setUserEnabledReques return request; } + static Request hasPrivileges(HasPrivilegesRequest hasPrivilegesRequest) throws IOException { + Request request = new Request(HttpGet.METHOD_NAME, "/_xpack/security/user/_has_privileges"); + request.setEntity(createEntity(hasPrivilegesRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request clearRealmCache(ClearRealmCacheRequest clearRealmCacheRequest) { RequestConverters.EndpointBuilder builder = new RequestConverters.EndpointBuilder() .addPathPartAsIs("_xpack/security/realm"); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/AcknowledgedResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/AcknowledgedResponse.java index f46ea88d473d0..8122b30d7cb87 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/AcknowledgedResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/AcknowledgedResponse.java @@ -21,9 +21,6 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -32,7 +29,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -public class AcknowledgedResponse implements ToXContentObject { +public class AcknowledgedResponse { protected static final String PARSE_FIELD_NAME = "acknowledged"; private static final ConstructingObjectParser PARSER = AcknowledgedResponse @@ -75,16 +72,6 @@ public int hashCode() { return Objects.hash(acknowledged); } - @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject(); - { - builder.field(getFieldName(), isAcknowledged()); - } - builder.endObject(); - return builder; - } - /** * @return the field name this response uses to output the acknowledged flag */ diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountRequest.java index 6d4589c7861f6..4b40fde53e954 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountRequest.java @@ -90,7 +90,8 @@ public CountRequest source(SearchSourceBuilder searchSourceBuilder) { /** * The document types to execute the count against. Defaults to be executed against all types. * - * @deprecated Types are going away, prefer filtering on a type. + * @deprecated Types are in the process of being removed. Instead of using a type, prefer to + * filter on a field on the document. */ @Deprecated public CountRequest types(String... types) { @@ -172,6 +173,11 @@ public CountRequest terminateAfter(int terminateAfter) { return this; } + /** + * @deprecated Types are in the process of being removed. Instead of using a type, prefer to + * filter on a field on the document. + */ + @Deprecated public String[] types() { return Arrays.copyOf(this.types, this.types.length); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsRequest.java index 5c94dfd0a3375..579ab52185198 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsRequest.java @@ -33,6 +33,8 @@ public class TermVectorsRequest implements ToXContentObject, Validatable { private final String index; private final String type; private String id = null; + private XContentBuilder docBuilder = null; + private String routing = null; private String preference = null; private boolean realtime = true; @@ -44,7 +46,6 @@ public class TermVectorsRequest implements ToXContentObject, Validatable { private boolean requestTermStatistics = false; private Map perFieldAnalyzer = null; private Map filterSettings = null; - private XContentBuilder docBuilder = null; /** @@ -54,7 +55,8 @@ public class TermVectorsRequest implements ToXContentObject, Validatable { * @param docId - id of the document */ public TermVectorsRequest(String index, String type, String docId) { - this(index, type); + this.index = index; + this.type = type; this.id = docId; } @@ -62,10 +64,12 @@ public TermVectorsRequest(String index, String type, String docId) { * Constructs TermVectorRequest for an artificial document * @param index - index of the document * @param type - type of the document + * @param docBuilder - an artificial document */ - public TermVectorsRequest(String index, String type) { + public TermVectorsRequest(String index, String type, XContentBuilder docBuilder) { this.index = index; this.type = type; + this.docBuilder = docBuilder; } /** diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteModelSnapshotRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteModelSnapshotRequest.java new file mode 100644 index 0000000000000..1c153e3555b15 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteModelSnapshotRequest.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.process.ModelSnapshot; + + +import java.util.Objects; + +/** + * Request to delete a Machine Learning Model Snapshot Job via its Job and Snapshot IDs + */ +public class DeleteModelSnapshotRequest extends ActionRequest { + + private final String jobId; + private final String snapshotId; + + public DeleteModelSnapshotRequest(String jobId, String snapshotId) { + this.jobId = Objects.requireNonNull(jobId, "[" + Job.ID + "] must not be null"); + this.snapshotId = Objects.requireNonNull(snapshotId, "[" + ModelSnapshot.SNAPSHOT_ID + "] must not be null"); + } + + public String getJobId() { + return jobId; + } + + public String getSnapshotId() { + return snapshotId; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, snapshotId); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || obj.getClass() != getClass()) { + return false; + } + + DeleteModelSnapshotRequest other = (DeleteModelSnapshotRequest) obj; + return Objects.equals(jobId, other.jobId) && Objects.equals(snapshotId, other.snapshotId); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/UpdateFilterRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/UpdateFilterRequest.java new file mode 100644 index 0000000000000..fb7d06a552514 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/UpdateFilterRequest.java @@ -0,0 +1,156 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.MlFilter; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collection; +import java.util.Objects; +import java.util.SortedSet; +import java.util.TreeSet; + +/** + * Updates an existing {@link MlFilter} configuration + */ +public class UpdateFilterRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField ADD_ITEMS = new ParseField("add_items"); + public static final ParseField REMOVE_ITEMS = new ParseField("remove_items"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("update_filter_request", (a) -> new UpdateFilterRequest((String)a[0])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), MlFilter.ID); + PARSER.declareStringOrNull(UpdateFilterRequest::setDescription, MlFilter.DESCRIPTION); + PARSER.declareStringArray(UpdateFilterRequest::setAddItems, ADD_ITEMS); + PARSER.declareStringArray(UpdateFilterRequest::setRemoveItems, REMOVE_ITEMS); + } + + private String filterId; + private String description; + private SortedSet addItems; + private SortedSet removeItems; + + /** + * Construct a new request referencing a non-null, existing filter_id + * @param filterId Id referencing the filter to update + */ + public UpdateFilterRequest(String filterId) { + this.filterId = Objects.requireNonNull(filterId, "[" + MlFilter.ID.getPreferredName() + "] must not be null"); + } + + public String getFilterId() { + return filterId; + } + + public String getDescription() { + return description; + } + + /** + * The new description of the filter + * @param description the updated filter description + */ + public void setDescription(String description) { + this.description = description; + } + + public SortedSet getAddItems() { + return addItems; + } + + /** + * The collection of items to add to the filter + * @param addItems non-null items to add to the filter, defaults to empty array + */ + public void setAddItems(Collection addItems) { + this.addItems = new TreeSet<>(Objects.requireNonNull(addItems, + "[" + ADD_ITEMS.getPreferredName()+"] must not be null")); + } + + public SortedSet getRemoveItems() { + return removeItems; + } + + /** + * The collection of items to remove from the filter + * @param removeItems non-null items to remove from the filter, defaults to empty array + */ + public void setRemoveItems(Collection removeItems) { + this.removeItems = new TreeSet<>(Objects.requireNonNull(removeItems, + "[" + REMOVE_ITEMS.getPreferredName()+"] must not be null")); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MlFilter.ID.getPreferredName(), filterId); + if (description != null) { + builder.field(MlFilter.DESCRIPTION.getPreferredName(), description); + } + if (addItems != null) { + builder.field(ADD_ITEMS.getPreferredName(), addItems); + } + if (removeItems != null) { + builder.field(REMOVE_ITEMS.getPreferredName(), removeItems); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(filterId, description, addItems, removeItems); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + UpdateFilterRequest other = (UpdateFilterRequest) obj; + return Objects.equals(filterId, other.filterId) + && Objects.equals(description, other.description) + && Objects.equals(addItems, other.addItems) + && Objects.equals(removeItems, other.removeItems); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java index 84deae61f8e62..4b9bc8abf5337 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java @@ -62,6 +62,7 @@ public class DatafeedConfig implements ToXContentObject { public static final ParseField AGGREGATIONS = new ParseField("aggregations"); public static final ParseField SCRIPT_FIELDS = new ParseField("script_fields"); public static final ParseField CHUNKING_CONFIG = new ParseField("chunking_config"); + public static final ParseField DELAYED_DATA_CHECK_CONFIG = new ParseField("delayed_data_check_config"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "datafeed_config", true, a -> new Builder((String)a[0], (String)a[1])); @@ -88,6 +89,7 @@ public class DatafeedConfig implements ToXContentObject { }, SCRIPT_FIELDS); PARSER.declareInt(Builder::setScrollSize, SCROLL_SIZE); PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSER, CHUNKING_CONFIG); + PARSER.declareObject(Builder::setDelayedDataCheckConfig, DelayedDataCheckConfig.PARSER, DELAYED_DATA_CHECK_CONFIG); } private static BytesReference parseBytes(XContentParser parser) throws IOException { @@ -107,10 +109,12 @@ private static BytesReference parseBytes(XContentParser parser) throws IOExcepti private final List scriptFields; private final Integer scrollSize; private final ChunkingConfig chunkingConfig; + private final DelayedDataCheckConfig delayedDataCheckConfig; + private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, List types, BytesReference query, BytesReference aggregations, List scriptFields, - Integer scrollSize, ChunkingConfig chunkingConfig) { + Integer scrollSize, ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig) { this.id = id; this.jobId = jobId; this.queryDelay = queryDelay; @@ -122,6 +126,7 @@ private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue this.scriptFields = scriptFields == null ? null : Collections.unmodifiableList(scriptFields); this.scrollSize = scrollSize; this.chunkingConfig = chunkingConfig; + this.delayedDataCheckConfig = delayedDataCheckConfig; } public String getId() { @@ -168,6 +173,10 @@ public ChunkingConfig getChunkingConfig() { return chunkingConfig; } + public DelayedDataCheckConfig getDelayedDataCheckConfig() { + return delayedDataCheckConfig; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -204,6 +213,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (chunkingConfig != null) { builder.field(CHUNKING_CONFIG.getPreferredName(), chunkingConfig); } + if (delayedDataCheckConfig != null) { + builder.field(DELAYED_DATA_CHECK_CONFIG.getPreferredName(), delayedDataCheckConfig); + } builder.endObject(); return builder; @@ -244,7 +256,8 @@ public boolean equals(Object other) { && Objects.equals(this.scrollSize, that.scrollSize) && Objects.equals(asMap(this.aggregations), asMap(that.aggregations)) && Objects.equals(this.scriptFields, that.scriptFields) - && Objects.equals(this.chunkingConfig, that.chunkingConfig); + && Objects.equals(this.chunkingConfig, that.chunkingConfig) + && Objects.equals(this.delayedDataCheckConfig, that.delayedDataCheckConfig); } /** @@ -255,7 +268,7 @@ public boolean equals(Object other) { @Override public int hashCode() { return Objects.hash(id, jobId, frequency, queryDelay, indices, types, asMap(query), scrollSize, asMap(aggregations), scriptFields, - chunkingConfig); + chunkingConfig, delayedDataCheckConfig); } public static Builder builder(String id, String jobId) { @@ -275,6 +288,7 @@ public static class Builder { private List scriptFields; private Integer scrollSize; private ChunkingConfig chunkingConfig; + private DelayedDataCheckConfig delayedDataCheckConfig; public Builder(String id, String jobId) { this.id = Objects.requireNonNull(id, ID.getPreferredName()); @@ -293,6 +307,7 @@ public Builder(DatafeedConfig config) { this.scriptFields = config.scriptFields; this.scrollSize = config.scrollSize; this.chunkingConfig = config.chunkingConfig; + this.delayedDataCheckConfig = config.getDelayedDataCheckConfig(); } public Builder setIndices(List indices) { @@ -366,9 +381,23 @@ public Builder setChunkingConfig(ChunkingConfig chunkingConfig) { return this; } + /** + * This sets the {@link DelayedDataCheckConfig} settings. + * + * See {@link DelayedDataCheckConfig} for more information. + * + * @param delayedDataCheckConfig the delayed data check configuration + * Default value is enabled, with `check_window` being null. This means the true window is + * calculated when the real-time Datafeed runs. + */ + public Builder setDelayedDataCheckConfig(DelayedDataCheckConfig delayedDataCheckConfig) { + this.delayedDataCheckConfig = delayedDataCheckConfig; + return this; + } + public DatafeedConfig build() { return new DatafeedConfig(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize, - chunkingConfig); + chunkingConfig, delayedDataCheckConfig); } private static BytesReference xContentToBytes(ToXContentObject object) throws IOException { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java index 119f70fc79756..5daacdd9a0588 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java @@ -77,6 +77,9 @@ public class DatafeedUpdate implements ToXContentObject { }, DatafeedConfig.SCRIPT_FIELDS); PARSER.declareInt(Builder::setScrollSize, DatafeedConfig.SCROLL_SIZE); PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSER, DatafeedConfig.CHUNKING_CONFIG); + PARSER.declareObject(Builder::setDelayedDataCheckConfig, + DelayedDataCheckConfig.PARSER, + DatafeedConfig.DELAYED_DATA_CHECK_CONFIG); } private static BytesReference parseBytes(XContentParser parser) throws IOException { @@ -96,10 +99,11 @@ private static BytesReference parseBytes(XContentParser parser) throws IOExcepti private final List scriptFields; private final Integer scrollSize; private final ChunkingConfig chunkingConfig; + private final DelayedDataCheckConfig delayedDataCheckConfig; private DatafeedUpdate(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, List types, BytesReference query, BytesReference aggregations, List scriptFields, - Integer scrollSize, ChunkingConfig chunkingConfig) { + Integer scrollSize, ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig) { this.id = id; this.jobId = jobId; this.queryDelay = queryDelay; @@ -111,6 +115,7 @@ private DatafeedUpdate(String id, String jobId, TimeValue queryDelay, TimeValue this.scriptFields = scriptFields; this.scrollSize = scrollSize; this.chunkingConfig = chunkingConfig; + this.delayedDataCheckConfig = delayedDataCheckConfig; } /** @@ -146,6 +151,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endObject(); } + if (delayedDataCheckConfig != null) { + builder.field(DatafeedConfig.DELAYED_DATA_CHECK_CONFIG.getPreferredName(), delayedDataCheckConfig); + } addOptionalField(builder, DatafeedConfig.SCROLL_SIZE, scrollSize); addOptionalField(builder, DatafeedConfig.CHUNKING_CONFIG, chunkingConfig); builder.endObject(); @@ -198,6 +206,10 @@ public ChunkingConfig getChunkingConfig() { return chunkingConfig; } + public DelayedDataCheckConfig getDelayedDataCheckConfig() { + return delayedDataCheckConfig; + } + private static Map asMap(BytesReference bytesReference) { return bytesReference == null ? null : XContentHelper.convertToMap(bytesReference, true, XContentType.JSON).v2(); } @@ -232,6 +244,7 @@ public boolean equals(Object other) { && Objects.equals(asMap(this.query), asMap(that.query)) && Objects.equals(this.scrollSize, that.scrollSize) && Objects.equals(asMap(this.aggregations), asMap(that.aggregations)) + && Objects.equals(this.delayedDataCheckConfig, that.delayedDataCheckConfig) && Objects.equals(this.scriptFields, that.scriptFields) && Objects.equals(this.chunkingConfig, that.chunkingConfig); } @@ -244,7 +257,7 @@ public boolean equals(Object other) { @Override public int hashCode() { return Objects.hash(id, jobId, frequency, queryDelay, indices, types, asMap(query), scrollSize, asMap(aggregations), scriptFields, - chunkingConfig); + chunkingConfig, delayedDataCheckConfig); } public static Builder builder(String id) { @@ -264,6 +277,7 @@ public static class Builder { private List scriptFields; private Integer scrollSize; private ChunkingConfig chunkingConfig; + private DelayedDataCheckConfig delayedDataCheckConfig; public Builder(String id) { this.id = Objects.requireNonNull(id, DatafeedConfig.ID.getPreferredName()); @@ -281,6 +295,7 @@ public Builder(DatafeedUpdate config) { this.scriptFields = config.scriptFields; this.scrollSize = config.scrollSize; this.chunkingConfig = config.chunkingConfig; + this.delayedDataCheckConfig = config.delayedDataCheckConfig; } public Builder setJobId(String jobId) { @@ -359,9 +374,14 @@ public Builder setChunkingConfig(ChunkingConfig chunkingConfig) { return this; } + public Builder setDelayedDataCheckConfig(DelayedDataCheckConfig delayedDataCheckConfig) { + this.delayedDataCheckConfig = delayedDataCheckConfig; + return this; + } + public DatafeedUpdate build() { return new DatafeedUpdate(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize, - chunkingConfig); + chunkingConfig, delayedDataCheckConfig); } private static BytesReference xContentToBytes(ToXContentObject object) throws IOException { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DelayedDataCheckConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DelayedDataCheckConfig.java new file mode 100644 index 0000000000000..43dd2c9a5a958 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DelayedDataCheckConfig.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.datafeed; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * The configuration object containing the delayed data check settings. + * + * See {@link DelayedDataCheckConfig#enabledDelayedDataCheckConfig(TimeValue)} for creating a new + * enabled datacheck with the given check_window + * + * See {@link DelayedDataCheckConfig#disabledDelayedDataCheckConfig()} for creating a config for disabling + * delayed data checking. + */ +public class DelayedDataCheckConfig implements ToXContentObject { + + public static final ParseField ENABLED = new ParseField("enabled"); + public static final ParseField CHECK_WINDOW = new ParseField("check_window"); + + // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "delayed_data_check_config", true, a -> new DelayedDataCheckConfig((Boolean) a[0], (TimeValue) a[1])); + static { + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), ENABLED); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return TimeValue.parseTimeValue(p.text(), CHECK_WINDOW.getPreferredName()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, CHECK_WINDOW, ObjectParser.ValueType.STRING); + } + + /** + * This creates a new DelayedDataCheckConfig that has a check_window of the passed `timeValue` + * + * We query the index to the latest finalized bucket from this TimeValue in the past looking to see if any data has been indexed + * since the data was read with the Datafeed. + * + * The window must be larger than the {@link org.elasticsearch.client.ml.job.config.AnalysisConfig#bucketSpan}, less than + * 24 hours, and span less than 10,000x buckets. + * + * + * @param timeValue The time length in the past from the latest finalized bucket to look for latent data. + * If `null` is provided, the appropriate window is calculated when it is used + **/ + public static DelayedDataCheckConfig enabledDelayedDataCheckConfig(TimeValue timeValue) { + return new DelayedDataCheckConfig(true, timeValue); + } + + /** + * This creates a new DelayedDataCheckConfig that disables the data check. + */ + public static DelayedDataCheckConfig disabledDelayedDataCheckConfig() { + return new DelayedDataCheckConfig(false, null); + } + + private final boolean enabled; + private final TimeValue checkWindow; + + DelayedDataCheckConfig(Boolean enabled, TimeValue checkWindow) { + this.enabled = enabled; + this.checkWindow = checkWindow; + } + + public boolean isEnabled() { + return enabled; + } + + @Nullable + public TimeValue getCheckWindow() { + return checkWindow; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ENABLED.getPreferredName(), enabled); + if (checkWindow != null) { + builder.field(CHECK_WINDOW.getPreferredName(), checkWindow.getStringRep()); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(enabled, checkWindow); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + DelayedDataCheckConfig other = (DelayedDataCheckConfig) obj; + return Objects.equals(this.enabled, other.enabled) && Objects.equals(this.checkWindow, other.checkWindow); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/DeleteRollupJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/DeleteRollupJobResponse.java deleted file mode 100644 index a4f2cd45a2a26..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/DeleteRollupJobResponse.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.rollup; - -import org.elasticsearch.client.core.AcknowledgedResponse; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.XContentParser; - -import java.io.IOException; - -public class DeleteRollupJobResponse extends AcknowledgedResponse { - - public DeleteRollupJobResponse(boolean acknowledged) { - super(acknowledged); - } - - private static final ConstructingObjectParser PARSER = AcknowledgedResponse - .generateParser("delete_rollup_job_response", DeleteRollupJobResponse::new, AcknowledgedResponse.PARSE_FIELD_NAME); - - public static DeleteRollupJobResponse fromXContent(final XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobResponse.java deleted file mode 100644 index 6a93f364c68e6..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobResponse.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.client.rollup; - -import org.elasticsearch.client.core.AcknowledgedResponse; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.XContentParser; - -import java.io.IOException; - -public class PutRollupJobResponse extends AcknowledgedResponse { - - public PutRollupJobResponse(boolean acknowledged) { - super(acknowledged); - } - - private static final ConstructingObjectParser PARSER = AcknowledgedResponse - .generateParser("delete_rollup_job_response", PutRollupJobResponse::new, AcknowledgedResponse.PARSE_FIELD_NAME); - - public static PutRollupJobResponse fromXContent(final XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/StopRollupJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/StopRollupJobRequest.java index 948dc5deac2a5..05c8836e2bf19 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/StopRollupJobRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/StopRollupJobRequest.java @@ -19,12 +19,15 @@ package org.elasticsearch.client.rollup; import org.elasticsearch.client.Validatable; +import org.elasticsearch.common.unit.TimeValue; import java.util.Objects; public class StopRollupJobRequest implements Validatable { private final String jobId; + private TimeValue timeout; + private Boolean waitForCompletion; public StopRollupJobRequest(final String jobId) { this.jobId = Objects.requireNonNull(jobId, "id parameter must not be null"); @@ -46,4 +49,26 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(jobId); } + + /** + * Sets the requests optional "timeout" parameter. + */ + public void timeout(TimeValue timeout) { + this.timeout = timeout; + } + + public TimeValue timeout() { + return this.timeout; + } + + /** + * Sets the requests optional "wait_for_completion". + */ + public void waitForCompletion(boolean waitForCompletion) { + this.waitForCompletion = waitForCompletion; + } + + public Boolean waitForCompletion() { + return this.waitForCompletion; + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/HasPrivilegesRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/HasPrivilegesRequest.java new file mode 100644 index 0000000000000..0e47c81d6eaa0 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/HasPrivilegesRequest.java @@ -0,0 +1,96 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.client.security.user.privileges.ApplicationResourcePrivileges; +import org.elasticsearch.client.security.user.privileges.IndicesPrivileges; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; +import java.util.Set; + +import static java.util.Collections.emptySet; +import static java.util.Collections.unmodifiableSet; + +/** + * Request to determine whether the current user has a list of privileges. + */ +public final class HasPrivilegesRequest implements Validatable, ToXContentObject { + + private final Set clusterPrivileges; + private final Set indexPrivileges; + private final Set applicationPrivileges; + + public HasPrivilegesRequest(@Nullable Set clusterPrivileges, + @Nullable Set indexPrivileges, + @Nullable Set applicationPrivileges) { + this.clusterPrivileges = clusterPrivileges == null ? emptySet() : unmodifiableSet(clusterPrivileges); + this.indexPrivileges = indexPrivileges == null ? emptySet() : unmodifiableSet(indexPrivileges); + this.applicationPrivileges = applicationPrivileges == null ? emptySet() : unmodifiableSet(applicationPrivileges); + + if (this.clusterPrivileges.isEmpty() && this.indexPrivileges.isEmpty() && this.applicationPrivileges.isEmpty()) { + throw new IllegalArgumentException("At last 1 privilege must be specified"); + } + } + + public Set getClusterPrivileges() { + return clusterPrivileges; + } + + public Set getIndexPrivileges() { + return indexPrivileges; + } + + public Set getApplicationPrivileges() { + return applicationPrivileges; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field("cluster", clusterPrivileges) + .field("index", indexPrivileges) + .field("application", applicationPrivileges) + .endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final HasPrivilegesRequest that = (HasPrivilegesRequest) o; + return Objects.equals(clusterPrivileges, that.clusterPrivileges) && + Objects.equals(indexPrivileges, that.indexPrivileges) && + Objects.equals(applicationPrivileges, that.applicationPrivileges); + } + + @Override + public int hashCode() { + return Objects.hash(clusterPrivileges, indexPrivileges, applicationPrivileges); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/HasPrivilegesResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/HasPrivilegesResponse.java new file mode 100644 index 0000000000000..41ba3a4bcb038 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/HasPrivilegesResponse.java @@ -0,0 +1,252 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.function.BiConsumer; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Response when checking whether the current user has a defined set of privileges. + */ +public final class HasPrivilegesResponse { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "has_privileges_response", true, args -> new HasPrivilegesResponse( + (String) args[0], (Boolean) args[1], checkMap(args[2], 0), checkMap(args[3], 1), checkMap(args[4], 2))); + + static { + PARSER.declareString(constructorArg(), new ParseField("username")); + PARSER.declareBoolean(constructorArg(), new ParseField("has_all_requested")); + declareMap(constructorArg(), "cluster"); + declareMap(constructorArg(), "index"); + declareMap(constructorArg(), "application"); + } + + @SuppressWarnings("unchecked") + private static Map checkMap(Object argument, int depth) { + if (argument instanceof Map) { + Map map = (Map) argument; + if (depth == 0) { + map.values().stream() + .filter(val -> (val instanceof Boolean) == false) + .forEach(val -> { + throw new IllegalArgumentException("Map value [" + val + "] in [" + map + "] is not a Boolean"); + }); + } else { + map.values().stream().forEach(val -> checkMap(val, depth - 1)); + } + return map; + } + throw new IllegalArgumentException("Value [" + argument + "] is not an Object"); + } + + private static void declareMap(BiConsumer> arg, String name) { + PARSER.declareField(arg, XContentParser::map, new ParseField(name), ObjectParser.ValueType.OBJECT); + } + + private final String username; + private final boolean hasAllRequested; + private final Map clusterPrivileges; + private final Map> indexPrivileges; + private final Map>> applicationPrivileges; + + public HasPrivilegesResponse(String username, boolean hasAllRequested, + Map clusterPrivileges, + Map> indexPrivileges, + Map>> applicationPrivileges) { + this.username = username; + this.hasAllRequested = hasAllRequested; + this.clusterPrivileges = Collections.unmodifiableMap(clusterPrivileges); + this.indexPrivileges = unmodifiableMap2(indexPrivileges); + this.applicationPrivileges = unmodifiableMap3(applicationPrivileges); + } + + private static Map> unmodifiableMap2(final Map> map) { + final Map> copy = new HashMap<>(map); + copy.replaceAll((k, v) -> Collections.unmodifiableMap(v)); + return Collections.unmodifiableMap(copy); + } + + private static Map>> unmodifiableMap3( + final Map>> map) { + final Map>> copy = new HashMap<>(map); + copy.replaceAll((k, v) -> unmodifiableMap2(v)); + return Collections.unmodifiableMap(copy); + } + + public static HasPrivilegesResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + /** + * The username (principal) of the user for which the privileges check was executed. + */ + public String getUsername() { + return username; + } + + /** + * {@code true} if the user has every privilege that was checked. Otherwise {@code false}. + */ + public boolean hasAllRequested() { + return hasAllRequested; + } + + /** + * @param clusterPrivilegeName The name of a cluster privilege. This privilege must have been specified (verbatim) in the + * {@link HasPrivilegesRequest#getClusterPrivileges() cluster privileges of the request}. + * @return {@code true} if the user has the specified cluster privilege. {@code false} if the privilege was checked + * but it has not been granted to the user. + * @throws IllegalArgumentException if the response did not include a value for the specified privilege name. + * The response only includes values for privileges that were + * {@link HasPrivilegesRequest#getClusterPrivileges() included in the request}. + */ + public boolean hasClusterPrivilege(String clusterPrivilegeName) throws IllegalArgumentException { + Boolean has = clusterPrivileges.get(clusterPrivilegeName); + if (has == null) { + throw new IllegalArgumentException("Cluster privilege [" + clusterPrivilegeName + "] was not included in this response"); + } + return has; + } + + /** + * @param indexName The name of the index to check. This index must have been specified (verbatim) in the + * {@link HasPrivilegesRequest#getIndexPrivileges() requested index privileges}. + * @param privilegeName The name of the index privilege to check. This privilege must have been specified (verbatim), for the + * given index, in the {@link HasPrivilegesRequest#getIndexPrivileges() requested index privileges}. + * @return {@code true} if the user has the specified privilege on the specified index. {@code false} if the privilege was checked + * for that index and was not granted to the user. + * @throws IllegalArgumentException if the response did not include a value for the specified index and privilege name pair. + * The response only includes values for indices and privileges that were + * {@link HasPrivilegesRequest#getIndexPrivileges() included in the request}. + */ + public boolean hasIndexPrivilege(String indexName, String privilegeName) { + Map indexPrivileges = this.indexPrivileges.get(indexName); + if (indexPrivileges == null) { + throw new IllegalArgumentException("No privileges for index [" + indexName + "] were included in this response"); + } + Boolean has = indexPrivileges.get(privilegeName); + if (has == null) { + throw new IllegalArgumentException("Privilege [" + privilegeName + "] was not included in the response for index [" + + indexName + "]"); + } + return has; + } + + /** + * @param applicationName The name of the application to check. This application must have been specified (verbatim) in the + * {@link HasPrivilegesRequest#getApplicationPrivileges() requested application privileges}. + * @param resourceName The name of the resource to check. This resource must have been specified (verbatim), for the given + * application in the {@link HasPrivilegesRequest#getApplicationPrivileges() requested application privileges}. + * @param privilegeName The name of the privilege to check. This privilege must have been specified (verbatim), for the given + * application and resource, in the + * {@link HasPrivilegesRequest#getApplicationPrivileges() requested application privileges}. + * @return {@code true} if the user has the specified privilege on the specified resource in the specified application. + * {@code false} if the privilege was checked for that application and resource, but was not granted to the user. + * @throws IllegalArgumentException if the response did not include a value for the specified application, resource and privilege + * triplet. The response only includes values for applications, resources and privileges that were + * {@link HasPrivilegesRequest#getApplicationPrivileges() included in the request}. + */ + public boolean hasApplicationPrivilege(String applicationName, String resourceName, String privilegeName) { + final Map> appPrivileges = this.applicationPrivileges.get(applicationName); + if (appPrivileges == null) { + throw new IllegalArgumentException("No privileges for application [" + applicationName + "] were included in this response"); + } + final Map resourcePrivileges = appPrivileges.get(resourceName); + if (resourcePrivileges == null) { + throw new IllegalArgumentException("No privileges for resource [" + resourceName + + "] were included in the response for application [" + applicationName + "]"); + } + Boolean has = resourcePrivileges.get(privilegeName); + if (has == null) { + throw new IllegalArgumentException("Privilege [" + privilegeName + "] was not included in the response for application [" + + applicationName + "] and resource [" + resourceName + "]"); + } + return has; + } + + /** + * A {@code Map} from cluster-privilege-name to access. Each requested privilege is included as a key in the map, and the + * associated value indicates whether the user was granted that privilege. + *

+ * The {@link #hasClusterPrivilege} method should be used in preference to directly accessing this map. + *

+ */ + public Map getClusterPrivileges() { + return clusterPrivileges; + } + + /** + * A {@code Map} from index-name + privilege-name to access. Each requested index is a key in the outer map. + * Each requested privilege is a key in the inner map. The inner most {@code Boolean} value indicates whether + * the user was granted that privilege on that index. + *

+ * The {@link #hasIndexPrivilege} method should be used in preference to directly accessing this map. + *

+ */ + public Map> getIndexPrivileges() { + return indexPrivileges; + } + + /** + * A {@code Map} from application-name + resource-name + privilege-name to access. Each requested application is a key in the + * outer-most map. Each requested resource is a key in the next-level map. The requested privileges form the keys in the inner-most map. + * The {@code Boolean} value indicates whether the user was granted that privilege on that resource within that application. + *

+ * The {@link #hasApplicationPrivilege} method should be used in preference to directly accessing this map. + *

+ */ + public Map>> getApplicationPrivileges() { + return applicationPrivileges; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || this.getClass() != o.getClass()) { + return false; + } + final HasPrivilegesResponse that = (HasPrivilegesResponse) o; + return this.hasAllRequested == that.hasAllRequested && + Objects.equals(this.username, that.username) && + Objects.equals(this.clusterPrivileges, that.clusterPrivileges) && + Objects.equals(this.indexPrivileges, that.indexPrivileges) && + Objects.equals(this.applicationPrivileges, that.applicationPrivileges); + } + + @Override + public int hashCode() { + return Objects.hash(username, hasAllRequested, clusterPrivileges, indexPrivileges, applicationPrivileges); + } +} + diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/xpack/XPackInfoResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/xpack/XPackInfoResponse.java index f9a92d2fbbe02..e8fc2ce89b251 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/xpack/XPackInfoResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/xpack/XPackInfoResponse.java @@ -18,20 +18,15 @@ */ package org.elasticsearch.client.xpack; +import org.elasticsearch.client.license.LicenseStatus; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.client.license.LicenseStatus; import java.io.IOException; -import java.util.ArrayList; import java.util.Collections; -import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -39,12 +34,11 @@ import java.util.Objects; import java.util.Set; import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; -public class XPackInfoResponse implements ToXContentObject { +public class XPackInfoResponse { /** * Value of the license's expiration time if it should never expire. */ @@ -102,7 +96,11 @@ public int hashCode() { @Override public String toString() { - return Strings.toString(this, true, false); + return "XPackInfoResponse{" + + "buildInfo=" + buildInfo + + ", licenseInfo=" + licenseInfo + + ", featureSetsInfo=" + featureSetsInfo + + '}'; } private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( @@ -131,41 +129,12 @@ public String toString() { (p, c, name) -> FeatureSetsInfo.FeatureSet.PARSER.parse(p, name), new ParseField("features")); } + public static XPackInfoResponse fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - - if (buildInfo != null) { - builder.field("build", buildInfo, params); - } - - EnumSet categories = XPackInfoRequest.Category - .toSet(Strings.splitStringByCommaToArray(params.param("categories", "_all"))); - if (licenseInfo != null) { - builder.field("license", licenseInfo, params); - } else if (categories.contains(XPackInfoRequest.Category.LICENSE)) { - // if the user requested the license info, and there is no license, we should send - // back an explicit null value (indicating there is no license). This is different - // than not adding the license info at all - builder.nullField("license"); - } - - if (featureSetsInfo != null) { - builder.field("features", featureSetsInfo, params); - } - - if (params.paramAsBoolean("human", true)) { - builder.field("tagline", "You know, for X"); - } - - return builder.endObject(); - } - - public static class LicenseInfo implements ToXContentObject { + public static class LicenseInfo { private final String uid; private final String type; private final String mode; @@ -217,6 +186,17 @@ public int hashCode() { return Objects.hash(uid, type, mode, status, expiryDate); } + @Override + public String toString() { + return "LicenseInfo{" + + "uid='" + uid + '\'' + + ", type='" + type + '\'' + + ", mode='" + mode + '\'' + + ", status=" + status + + ", expiryDate=" + expiryDate + + '}'; + } + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "license_info", true, (a, v) -> { String uid = (String) a[0]; @@ -234,22 +214,9 @@ public int hashCode() { PARSER.declareString(constructorArg(), new ParseField("status")); PARSER.declareLong(optionalConstructorArg(), new ParseField("expiry_date_in_millis")); } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject() - .field("uid", uid) - .field("type", type) - .field("mode", mode) - .field("status", status.label()); - if (expiryDate != BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS) { - builder.timeField("expiry_date_in_millis", "expiry_date", expiryDate); - } - return builder.endObject(); - } } - public static class BuildInfo implements ToXContentObject { + public static class BuildInfo { private final String hash; private final String timestamp; @@ -280,23 +247,23 @@ public int hashCode() { return Objects.hash(hash, timestamp); } + @Override + public String toString() { + return "BuildInfo{" + + "hash='" + hash + '\'' + + ", timestamp='" + timestamp + '\'' + + '}'; + } + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "build_info", true, (a, v) -> new BuildInfo((String) a[0], (String) a[1])); static { PARSER.declareString(constructorArg(), new ParseField("hash")); PARSER.declareString(constructorArg(), new ParseField("date")); } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.startObject() - .field("hash", hash) - .field("date", timestamp) - .endObject(); - } } - public static class FeatureSetsInfo implements ToXContentObject { + public static class FeatureSetsInfo { private final Map featureSets; public FeatureSetsInfo(Set featureSets) { @@ -325,16 +292,13 @@ public int hashCode() { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - List names = new ArrayList<>(this.featureSets.keySet()).stream().sorted().collect(Collectors.toList()); - for (String name : names) { - builder.field(name, featureSets.get(name), params); - } - return builder.endObject(); + public String toString() { + return "FeatureSetsInfo{" + + "featureSets=" + featureSets + + '}'; } - public static class FeatureSet implements ToXContentObject { + public static class FeatureSet { private final String name; @Nullable private final String description; private final boolean available; @@ -389,6 +353,17 @@ public int hashCode() { return Objects.hash(name, description, available, enabled, nativeCodeInfo); } + @Override + public String toString() { + return "FeatureSet{" + + "name='" + name + '\'' + + ", description='" + description + '\'' + + ", available=" + available + + ", enabled=" + enabled + + ", nativeCodeInfo=" + nativeCodeInfo + + '}'; + } + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "feature_set", true, (a, name) -> { String description = (String) a[0]; @@ -404,20 +379,6 @@ public int hashCode() { PARSER.declareBoolean(constructorArg(), new ParseField("enabled")); PARSER.declareObject(optionalConstructorArg(), (p, name) -> p.map(), new ParseField("native_code_info")); } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - if (description != null) { - builder.field("description", description); - } - builder.field("available", available); - builder.field("enabled", enabled); - if (nativeCodeInfo != null) { - builder.field("native_code_info", nativeCodeInfo); - } - return builder.endObject(); - } } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index fed0e8921569c..234440bb0547b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -192,8 +192,8 @@ public void testExists() throws IOException { assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); } } - - public void testSourceExists() throws IOException { + + public void testSourceExists() throws IOException { { GetRequest getRequest = new GetRequest("index", "type", "id"); assertFalse(execute(getRequest, highLevelClient()::existsSource, highLevelClient()::existsSourceAsync)); @@ -215,8 +215,8 @@ public void testSourceExists() throws IOException { assertFalse(execute(getRequest, highLevelClient()::existsSource, highLevelClient()::existsSourceAsync)); } } - - public void testSourceDoesNotExist() throws IOException { + + public void testSourceDoesNotExist() throws IOException { final String noSourceIndex = "no_source"; { // Prepare @@ -224,8 +224,8 @@ public void testSourceDoesNotExist() throws IOException { .put("number_of_shards", 1) .put("number_of_replicas", 0) .build(); - String mapping = "\"_doc\": { \"_source\": {\n" + - " \"enabled\": false\n" + + String mapping = "\"_doc\": { \"_source\": {\n" + + " \"enabled\": false\n" + " } }"; createIndex(noSourceIndex, settings, mapping); assertEquals( @@ -240,13 +240,13 @@ public void testSourceDoesNotExist() throws IOException { RequestOptions.DEFAULT ).status() ); - } + } { GetRequest getRequest = new GetRequest(noSourceIndex, "_doc", "1"); assertTrue(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); assertFalse(execute(getRequest, highLevelClient()::existsSource, highLevelClient()::existsSourceAsync)); } - } + } public void testGet() throws IOException { { @@ -1154,10 +1154,10 @@ public void testTermvectors() throws IOException { } { // test _termvectors on artificial documents - TermVectorsRequest tvRequest = new TermVectorsRequest(sourceIndex, "_doc"); XContentBuilder docBuilder = XContentFactory.jsonBuilder(); docBuilder.startObject().field("field", "valuex").endObject(); - tvRequest.setDoc(docBuilder); + + TermVectorsRequest tvRequest = new TermVectorsRequest(sourceIndex, "_doc", docBuilder); TermVectorsResponse tvResponse = execute(tvRequest, highLevelClient()::termvectors, highLevelClient()::termvectorsAsync); TermVectorsResponse.TermVector.Token expectedToken = new TermVectorsResponse.TermVector.Token(0, 6, 0, null); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleIT.java index 028949b663bfa..9ed19e9afe734 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleIT.java @@ -194,7 +194,7 @@ public void testExplainLifecycle() throws Exception { assertEquals("foo-01", fooResponse.getIndex()); assertEquals("hot", fooResponse.getPhase()); assertEquals("rollover", fooResponse.getAction()); - assertEquals("attempt_rollover", fooResponse.getStep()); + assertEquals("check-rollover-ready", fooResponse.getStep()); assertEquals(new PhaseExecutionInfo(policy.getName(), new Phase("", hotPhase.getMinimumAge(), hotPhase.getActions()), 1L, expectedPolicyModifiedDate), fooResponse.getPhaseExecutionInfo()); IndexLifecycleExplainResponse bazResponse = indexResponses.get("baz-01"); @@ -203,7 +203,7 @@ public void testExplainLifecycle() throws Exception { assertEquals("baz-01", bazResponse.getIndex()); assertEquals("hot", bazResponse.getPhase()); assertEquals("rollover", bazResponse.getAction()); - assertEquals("attempt_rollover", bazResponse.getStep()); + assertEquals("check-rollover-ready", bazResponse.getStep()); IndexLifecycleExplainResponse squashResponse = indexResponses.get("squash"); assertNotNull(squashResponse); assertFalse(squashResponse.managedByILM()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java index bb4822990722e..c683941187751 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.client.ml.DeleteDatafeedRequest; import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; +import org.elasticsearch.client.ml.DeleteModelSnapshotRequest; import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.ForecastJobRequest; import org.elasticsearch.client.ml.GetBucketsRequest; @@ -52,6 +53,7 @@ import org.elasticsearch.client.ml.StartDatafeedRequest; import org.elasticsearch.client.ml.StartDatafeedRequestTests; import org.elasticsearch.client.ml.StopDatafeedRequest; +import org.elasticsearch.client.ml.UpdateFilterRequest; import org.elasticsearch.client.ml.UpdateJobRequest; import org.elasticsearch.client.ml.calendars.Calendar; import org.elasticsearch.client.ml.calendars.CalendarTests; @@ -74,6 +76,7 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -360,6 +363,16 @@ public void testDeleteForecast() { request.getParameters().get(DeleteForecastRequest.ALLOW_NO_FORECASTS.getPreferredName())); } + public void testDeleteModelSnapshot() { + String jobId = randomAlphaOfLength(10); + String snapshotId = randomAlphaOfLength(10); + DeleteModelSnapshotRequest deleteModelSnapshotRequest = new DeleteModelSnapshotRequest(jobId, snapshotId); + + Request request = MLRequestConverters.deleteModelSnapshot(deleteModelSnapshotRequest); + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/model_snapshots/" + snapshotId, request.getEndpoint()); + } + public void testGetBuckets() throws IOException { String jobId = randomAlphaOfLength(10); GetBucketsRequest getBucketsRequest = new GetBucketsRequest(jobId); @@ -566,6 +579,23 @@ public void testGetFilter() throws IOException { assertThat(request.getParameters().get(PageParams.SIZE.getPreferredName()), equalTo("10")); } + public void testUpdateFilter() throws IOException { + String filterId = randomAlphaOfLength(10); + UpdateFilterRequest updateFilterRequest = new UpdateFilterRequest(filterId); + updateFilterRequest.setDescription(randomAlphaOfLength(10)); + updateFilterRequest.setRemoveItems(Arrays.asList("item1", "item2")); + updateFilterRequest.setAddItems(Arrays.asList("item3", "item5")); + + Request request = MLRequestConverters.updateFilter(updateFilterRequest); + + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertThat(request.getEndpoint(), equalTo("/_xpack/ml/filters/"+filterId+"/_update")); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + UpdateFilterRequest parsedFilterRequest = UpdateFilterRequest.PARSER.apply(parser, null); + assertThat(parsedFilterRequest, equalTo(updateFilterRequest)); + } + } + private static Job createValidJob(String jobId) { AnalysisConfig.Builder analysisConfig = AnalysisConfig.builder(Collections.singletonList( Detector.builder().setFunction("count").build())); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 7bf8969d66b16..5a9c7890fdc47 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -34,6 +34,7 @@ import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.DeleteJobResponse; +import org.elasticsearch.client.ml.DeleteModelSnapshotRequest; import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.FlushJobResponse; import org.elasticsearch.client.ml.ForecastJobRequest; @@ -69,6 +70,7 @@ import org.elasticsearch.client.ml.StopDatafeedRequest; import org.elasticsearch.client.ml.StopDatafeedResponse; import org.elasticsearch.client.ml.UpdateDatafeedRequest; +import org.elasticsearch.client.ml.UpdateFilterRequest; import org.elasticsearch.client.ml.UpdateJobRequest; import org.elasticsearch.client.ml.calendars.Calendar; import org.elasticsearch.client.ml.calendars.CalendarTests; @@ -101,6 +103,7 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.hasItems; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; @@ -927,6 +930,28 @@ public void testGetFilters() throws Exception { } } + public void testUpdateFilter() throws Exception { + String filterId = "update-filter-test"; + MlFilter mlFilter = MlFilter.builder(filterId) + .setDescription("old description") + .setItems(Arrays.asList("olditem1", "olditem2")) + .build(); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putFilter(new PutFilterRequest(mlFilter), RequestOptions.DEFAULT); + + UpdateFilterRequest updateFilterRequest = new UpdateFilterRequest(filterId); + + updateFilterRequest.setAddItems(Arrays.asList("newItem1", "newItem2")); + updateFilterRequest.setRemoveItems(Collections.singletonList("olditem1")); + updateFilterRequest.setDescription("new description"); + MlFilter filter = execute(updateFilterRequest, + machineLearningClient::updateFilter, + machineLearningClient::updateFilterAsync).getResponse(); + + assertThat(filter.getDescription(), equalTo(updateFilterRequest.getDescription())); + assertThat(filter.getItems(), contains("newItem1", "newItem2", "olditem2")); + } + public static String randomValidJobId() { CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz0123456789".toCharArray()); return generator.ofCodePointsLength(random(), 10, 10); @@ -972,4 +997,38 @@ private String createAndPutDatafeed(String jobId, String indexName) throws IOExc highLevelClient().machineLearning().putDatafeed(new PutDatafeedRequest(datafeed), RequestOptions.DEFAULT); return datafeedId; } + + public void createModelSnapshot(String jobId, String snapshotId) throws IOException { + Job job = MachineLearningIT.buildJob(jobId); + highLevelClient().machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + IndexRequest indexRequest = new IndexRequest(".ml-anomalies-shared", "doc"); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + indexRequest.source("{\"job_id\":\"" + jobId + "\", \"timestamp\":1541587919000, " + + "\"description\":\"State persisted due to job close at 2018-11-07T10:51:59+0000\", " + + "\"snapshot_id\":\"" + snapshotId + "\", \"snapshot_doc_count\":1, \"model_size_stats\":{" + + "\"job_id\":\"" + jobId + "\", \"result_type\":\"model_size_stats\",\"model_bytes\":51722, " + + "\"total_by_field_count\":3, \"total_over_field_count\":0, \"total_partition_field_count\":2," + + "\"bucket_allocation_failures_count\":0, \"memory_status\":\"ok\", \"log_time\":1541587919000, " + + "\"timestamp\":1519930800000}, \"latest_record_time_stamp\":1519931700000," + + "\"latest_result_time_stamp\":1519930800000, \"retain\":false}", XContentType.JSON); + + highLevelClient().index(indexRequest, RequestOptions.DEFAULT); + } + + public void testDeleteModelSnapshot() throws IOException { + String jobId = "test-delete-model-snapshot"; + String snapshotId = "1541587919"; + + createModelSnapshot(jobId, snapshotId); + + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + + DeleteModelSnapshotRequest request = new DeleteModelSnapshotRequest(jobId, snapshotId); + + AcknowledgedResponse response = execute(request, machineLearningClient::deleteModelSnapshot, + machineLearningClient::deleteModelSnapshotAsync); + + assertTrue(response.isAcknowledged()); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java index afc5e99b5f03a..f94cc41432c4c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java @@ -106,15 +106,19 @@ public void testReindexTask() throws IOException, InterruptedException { ); } { - ReindexRequest reindexRequest = new ReindexRequest(); + // tag::submit-reindex-task + ReindexRequest reindexRequest = new ReindexRequest(); // <1> reindexRequest.setSourceIndices(sourceIndex); reindexRequest.setDestIndex(destinationIndex); - reindexRequest.setSourceQuery(new IdsQueryBuilder().addIds("1").types("type")); reindexRequest.setRefresh(true); - TaskSubmissionResponse reindexSubmission = highLevelClient().submitReindexTask(reindexRequest, RequestOptions.DEFAULT); + TaskSubmissionResponse reindexSubmission = highLevelClient() + .submitReindexTask(reindexRequest, RequestOptions.DEFAULT); // <2> + + String taskId = reindexSubmission.getTask(); // <3> + // end::submit-reindex-task - BooleanSupplier hasUpgradeCompleted = checkCompletionStatus(reindexSubmission.getTask()); + BooleanSupplier hasUpgradeCompleted = checkCompletionStatus(taskId); awaitBusy(hasUpgradeCompleted); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java index cc69abb3bbf3f..f34a73a0991cb 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java @@ -29,8 +29,8 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.core.AcknowledgedResponse; import org.elasticsearch.client.rollup.DeleteRollupJobRequest; -import org.elasticsearch.client.rollup.DeleteRollupJobResponse; import org.elasticsearch.client.rollup.GetRollupCapsRequest; import org.elasticsearch.client.rollup.GetRollupCapsResponse; import org.elasticsearch.client.rollup.GetRollupIndexCapsRequest; @@ -40,11 +40,10 @@ import org.elasticsearch.client.rollup.GetRollupJobResponse.IndexerState; import org.elasticsearch.client.rollup.GetRollupJobResponse.JobWrapper; import org.elasticsearch.client.rollup.PutRollupJobRequest; -import org.elasticsearch.client.rollup.PutRollupJobResponse; -import org.elasticsearch.client.rollup.RollableIndexCaps; -import org.elasticsearch.client.rollup.RollupJobCaps; import org.elasticsearch.client.rollup.StartRollupJobRequest; import org.elasticsearch.client.rollup.StartRollupJobResponse; +import org.elasticsearch.client.rollup.RollableIndexCaps; +import org.elasticsearch.client.rollup.RollupJobCaps; import org.elasticsearch.client.rollup.StopRollupJobRequest; import org.elasticsearch.client.rollup.StopRollupJobResponse; import org.elasticsearch.client.rollup.job.config.DateHistogramGroupConfig; @@ -158,7 +157,7 @@ public void testDeleteRollupJob() throws Exception { final RollupClient rollupClient = highLevelClient().rollup(); execute(putRollupJobRequest, rollupClient::putRollupJob, rollupClient::putRollupJobAsync); DeleteRollupJobRequest deleteRollupJobRequest = new DeleteRollupJobRequest(id); - DeleteRollupJobResponse deleteRollupJobResponse = highLevelClient().rollup() + AcknowledgedResponse deleteRollupJobResponse = highLevelClient().rollup() .deleteRollupJob(deleteRollupJobRequest, RequestOptions.DEFAULT); assertTrue(deleteRollupJobResponse.isAcknowledged()); } @@ -180,7 +179,7 @@ public void testPutStartAndGetRollupJob() throws Exception { new PutRollupJobRequest(new RollupJobConfig(id, indexPattern, rollupIndex, cron, pageSize, groups, metrics, timeout)); final RollupClient rollupClient = highLevelClient().rollup(); - PutRollupJobResponse response = execute(putRollupJobRequest, rollupClient::putRollupJob, rollupClient::putRollupJobAsync); + AcknowledgedResponse response = execute(putRollupJobRequest, rollupClient::putRollupJob, rollupClient::putRollupJobAsync); assertTrue(response.isAcknowledged()); StartRollupJobRequest startRequest = new StartRollupJobRequest(id); @@ -235,8 +234,14 @@ public void testPutStartAndGetRollupJob() throws Exception { // stop the job StopRollupJobRequest stopRequest = new StopRollupJobRequest(id); + stopRequest.waitForCompletion(randomBoolean()); StopRollupJobResponse stopResponse = execute(stopRequest, rollupClient::stopRollupJob, rollupClient::stopRollupJobAsync); assertTrue(stopResponse.isAcknowledged()); + if (stopRequest.waitForCompletion()) { + getResponse = execute(new GetRollupJobRequest(id), rollupClient::getRollupJob, rollupClient::getRollupJobAsync); + assertThat(getResponse.getJobs(), hasSize(1)); + assertThat(getResponse.getJobs().get(0).getStatus().getState(), equalTo(IndexerState.STOPPED)); + } } public void testGetMissingRollupJob() throws Exception { @@ -307,7 +312,7 @@ public void testGetRollupCaps() throws Exception { new PutRollupJobRequest(new RollupJobConfig(id, indexPattern, rollupIndex, cron, pageSize, groups, metrics, timeout)); final RollupClient rollupClient = highLevelClient().rollup(); - PutRollupJobResponse response = execute(putRollupJobRequest, rollupClient::putRollupJob, rollupClient::putRollupJobAsync); + AcknowledgedResponse response = execute(putRollupJobRequest, rollupClient::putRollupJob, rollupClient::putRollupJobAsync); assertTrue(response.isAcknowledged()); // wait for the PutJob api to create the index w/ metadata @@ -419,7 +424,7 @@ public void testGetRollupIndexCaps() throws Exception { new PutRollupJobRequest(new RollupJobConfig(id, indexPattern, rollupIndex, cron, pageSize, groups, metrics, timeout)); final RollupClient rollupClient = highLevelClient().rollup(); - PutRollupJobResponse response = execute(putRollupJobRequest, rollupClient::putRollupJob, rollupClient::putRollupJobAsync); + AcknowledgedResponse response = execute(putRollupJobRequest, rollupClient::putRollupJob, rollupClient::putRollupJobAsync); assertTrue(response.isAcknowledged()); // wait for the PutJob api to create the index w/ metadata diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupRequestConvertersTests.java index 12907f0f3b655..b4edeb46422ff 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupRequestConvertersTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.client.rollup.StopRollupJobRequest; import org.elasticsearch.client.rollup.job.config.RollupJobConfig; import org.elasticsearch.client.rollup.job.config.RollupJobConfigTests; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -64,13 +65,27 @@ public void testStartJob() throws IOException { public void testStopJob() throws IOException { String jobId = randomAlphaOfLength(5); - StopRollupJobRequest stopJob = new StopRollupJobRequest(jobId); + String expectedTimeOutString = null; + String expectedWaitForCompletion = null; + int expectedParameters = 0; + if (randomBoolean()) { + stopJob.timeout(TimeValue.parseTimeValue(randomPositiveTimeValue(), "timeout")); + expectedTimeOutString = stopJob.timeout().getStringRep(); + expectedParameters++; + } + if (randomBoolean()) { + stopJob.waitForCompletion(randomBoolean()); + expectedWaitForCompletion = stopJob.waitForCompletion().toString(); + expectedParameters++; + } Request request = RollupRequestConverters.stopJob(stopJob); assertThat(request.getEndpoint(), equalTo("/_xpack/rollup/job/" + jobId + "/_stop")); assertThat(HttpPost.METHOD_NAME, equalTo(request.getMethod())); - assertThat(request.getParameters().keySet(), empty()); + assertThat(request.getParameters().keySet().size(), equalTo(expectedParameters)); + assertThat(request.getParameters().get("timeout"), equalTo(expectedTimeOutString)); + assertThat(request.getParameters().get("wait_for_completion"), equalTo(expectedWaitForCompletion)); assertNull(request.getEntity()); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/AcknowledgedResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/AcknowledgedResponseTests.java index 36ba953073987..c05ea470c219f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/AcknowledgedResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/AcknowledgedResponseTests.java @@ -18,26 +18,33 @@ */ package org.elasticsearch.client.core; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; import java.io.IOException; -public class AcknowledgedResponseTests extends AbstractXContentTestCase { +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; - @Override - protected AcknowledgedResponse createTestInstance() { - return new AcknowledgedResponse(randomBoolean()); - } +public class AcknowledgedResponseTests extends ESTestCase { - @Override - protected AcknowledgedResponse doParseInstance(XContentParser parser) throws IOException { - return AcknowledgedResponse.fromXContent(parser); + public void testFromXContent() throws IOException { + xContentTester(this::createParser, + this::createTestInstance, + AcknowledgedResponseTests::toXContent, + AcknowledgedResponse::fromXContent) + .supportsUnknownFields(false) + .test(); + } + private AcknowledgedResponse createTestInstance() { + return new AcknowledgedResponse(randomBoolean()); } - @Override - protected boolean supportsUnknownFields() { - return false; + public static void toXContent(AcknowledgedResponse response, XContentBuilder builder) throws IOException { + builder.startObject(); + { + builder.field(response.getFieldName(), response.isAcknowledged()); + } + builder.endObject(); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java index 29bb860df7307..b71316c43f260 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java @@ -1565,10 +1565,12 @@ public void testTermVectors() throws Exception { { // tag::term-vectors-request-artificial - TermVectorsRequest request = new TermVectorsRequest("authors", "_doc"); + XContentBuilder docBuilder = XContentFactory.jsonBuilder(); docBuilder.startObject().field("user", "guest-user").endObject(); - request.setDoc(docBuilder); // <1> + TermVectorsRequest request = new TermVectorsRequest("authors", + "_doc", + docBuilder); // <1> // end::term-vectors-request-artificial // tag::term-vectors-request-optional-arguments diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java index 69bfbc11f766b..4ff50ba33e752 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java @@ -29,11 +29,16 @@ import org.elasticsearch.client.core.AcknowledgedResponse; import org.elasticsearch.client.indexlifecycle.DeleteAction; import org.elasticsearch.client.indexlifecycle.DeleteLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyResponse; import org.elasticsearch.client.indexlifecycle.LifecycleAction; import org.elasticsearch.client.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.client.indexlifecycle.LifecyclePolicyMetadata; import org.elasticsearch.client.indexlifecycle.Phase; import org.elasticsearch.client.indexlifecycle.PutLifecyclePolicyRequest; import org.elasticsearch.client.indexlifecycle.RolloverAction; +import org.elasticsearch.client.indexlifecycle.ShrinkAction; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -119,6 +124,108 @@ public void onFailure(Exception e) { } + public void testGetLifecyclePolicy() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + LifecyclePolicy myPolicyAsPut; + LifecyclePolicy otherPolicyAsPut; + // Set up some policies so we have something to get + { + Map phases = new HashMap<>(); + Map hotActions = new HashMap<>(); + hotActions.put(RolloverAction.NAME, new RolloverAction( + new ByteSizeValue(50, ByteSizeUnit.GB), null, null)); + phases.put("hot", new Phase("hot", TimeValue.ZERO, hotActions)); + + Map deleteActions = + Collections.singletonMap(DeleteAction.NAME, + new DeleteAction()); + phases.put("delete", + new Phase("delete", + new TimeValue(90, TimeUnit.DAYS), deleteActions)); + + myPolicyAsPut = new LifecyclePolicy("my_policy", phases); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(myPolicyAsPut); + + Map otherPolicyPhases = new HashMap<>(phases); + Map warmActions = Collections.singletonMap(ShrinkAction.NAME, new ShrinkAction(1)); + otherPolicyPhases.put("warm", new Phase("warm", new TimeValue(30, TimeUnit.DAYS), warmActions)); + otherPolicyAsPut = new LifecyclePolicy("other_policy", otherPolicyPhases); + + PutLifecyclePolicyRequest putRequest2 = new PutLifecyclePolicyRequest(otherPolicyAsPut); + + AcknowledgedResponse putResponse = client.indexLifecycle(). + putLifecyclePolicy(putRequest, RequestOptions.DEFAULT); + assertTrue(putResponse.isAcknowledged()); + AcknowledgedResponse putResponse2 = client.indexLifecycle(). + putLifecyclePolicy(putRequest2, RequestOptions.DEFAULT); + assertTrue(putResponse2.isAcknowledged()); + } + + // tag::ilm-get-lifecycle-policy-request + GetLifecyclePolicyRequest allRequest = + new GetLifecyclePolicyRequest(); // <1> + GetLifecyclePolicyRequest request = + new GetLifecyclePolicyRequest("my_policy", "other_policy"); // <2> + // end::ilm-get-lifecycle-policy-request + + // tag::ilm-get-lifecycle-policy-execute + GetLifecyclePolicyResponse response = client.indexLifecycle() + .getLifecyclePolicy(request, RequestOptions.DEFAULT); + // end::ilm-get-lifecycle-policy-execute + + // tag::ilm-get-lifecycle-policy-response + ImmutableOpenMap policies = + response.getPolicies(); + LifecyclePolicyMetadata myPolicyMetadata = + policies.get("my_policy"); // <1> + String myPolicyName = myPolicyMetadata.getName(); + long version = myPolicyMetadata.getVersion(); + String lastModified = myPolicyMetadata.getModifiedDateString(); + long lastModifiedDate = myPolicyMetadata.getModifiedDate(); + LifecyclePolicy myPolicy = myPolicyMetadata.getPolicy(); // <2> + // end::ilm-get-lifecycle-policy-response + + assertEquals(myPolicyAsPut, myPolicy); + assertEquals("my_policy", myPolicyName); + assertNotNull(lastModified); + assertNotEquals(0, lastModifiedDate); + + LifecyclePolicyMetadata otherPolicyMetadata = policies.get("other_policy"); + assertEquals(otherPolicyAsPut, otherPolicyMetadata.getPolicy()); + assertEquals("other_policy", otherPolicyMetadata.getName()); + assertNotNull(otherPolicyMetadata.getModifiedDateString()); + assertNotEquals(0, otherPolicyMetadata.getModifiedDate()); + + // tag::ilm-get-lifecycle-policy-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(GetLifecyclePolicyResponse response) + { + ImmutableOpenMap + policies = response.getPolicies(); // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::ilm-get-lifecycle-policy-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::ilm-get-lifecycle-policy-execute-async + client.indexLifecycle().getLifecyclePolicyAsync(request, + RequestOptions.DEFAULT, listener); // <1> + // end::ilm-get-lifecycle-policy-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + static Map toMap(Response response) throws IOException { return XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index e6a8976a3c75c..556e25a2b0ef5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -40,6 +40,7 @@ import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.DeleteJobResponse; +import org.elasticsearch.client.ml.DeleteModelSnapshotRequest; import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.FlushJobResponse; import org.elasticsearch.client.ml.ForecastJobRequest; @@ -87,12 +88,14 @@ import org.elasticsearch.client.ml.StopDatafeedRequest; import org.elasticsearch.client.ml.StopDatafeedResponse; import org.elasticsearch.client.ml.UpdateDatafeedRequest; +import org.elasticsearch.client.ml.UpdateFilterRequest; import org.elasticsearch.client.ml.UpdateJobRequest; import org.elasticsearch.client.ml.calendars.Calendar; import org.elasticsearch.client.ml.datafeed.ChunkingConfig; import org.elasticsearch.client.ml.datafeed.DatafeedConfig; import org.elasticsearch.client.ml.datafeed.DatafeedStats; import org.elasticsearch.client.ml.datafeed.DatafeedUpdate; +import org.elasticsearch.client.ml.datafeed.DelayedDataCheckConfig; import org.elasticsearch.client.ml.job.config.AnalysisConfig; import org.elasticsearch.client.ml.job.config.AnalysisLimits; import org.elasticsearch.client.ml.job.config.DataDescription; @@ -581,6 +584,14 @@ public void testPutDatafeed() throws Exception { datafeedBuilder.setQueryDelay(TimeValue.timeValueMinutes(1)); // <1> // end::put-datafeed-config-set-query-delay + // tag::put-datafeed-config-set-delayed-data-check-config + datafeedBuilder.setDelayedDataCheckConfig(DelayedDataCheckConfig + .enabledDelayedDataCheckConfig(TimeValue.timeValueHours(1))); // <1> + // end::put-datafeed-config-set-delayed-data-check-config + + // no need to accidentally trip internal validations due to job bucket size + datafeedBuilder.setDelayedDataCheckConfig(null); + List scriptFields = Collections.emptyList(); // tag::put-datafeed-config-set-script-fields datafeedBuilder.setScriptFields(scriptFields); // <1> @@ -1866,6 +1877,73 @@ public void onFailure(Exception e) { } } + public void testDeleteModelSnapshot() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + String jobId = "test-delete-model-snapshot"; + String snapshotId = "1541587919"; + Job job = MachineLearningIT.buildJob(jobId); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + // Let us index a snapshot + IndexRequest indexRequest = new IndexRequest(".ml-anomalies-shared", "doc"); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + indexRequest.source("{\"job_id\":\"" + jobId + "\", \"timestamp\":1541587919000, " + + "\"description\":\"State persisted due to job close at 2018-11-07T10:51:59+0000\", " + + "\"snapshot_id\":\"" + snapshotId + "\", \"snapshot_doc_count\":1, \"model_size_stats\":{" + + "\"job_id\":\"" + jobId + "\", \"result_type\":\"model_size_stats\",\"model_bytes\":51722, " + + "\"total_by_field_count\":3, \"total_over_field_count\":0, \"total_partition_field_count\":2," + + "\"bucket_allocation_failures_count\":0, \"memory_status\":\"ok\", \"log_time\":1541587919000, " + + "\"timestamp\":1519930800000}, \"latest_record_time_stamp\":1519931700000," + + "\"latest_result_time_stamp\":1519930800000, \"retain\":false}", XContentType.JSON); + { + client.index(indexRequest, RequestOptions.DEFAULT); + + // tag::delete-model-snapshot-request + DeleteModelSnapshotRequest request = new DeleteModelSnapshotRequest(jobId, snapshotId); // <1> + // end::delete-model-snapshot-request + + // tag::delete-model-snapshot-execute + AcknowledgedResponse response = client.machineLearning().deleteModelSnapshot(request, RequestOptions.DEFAULT); + // end::delete-model-snapshot-execute + + // tag::delete-model-snapshot-response + boolean isAcknowledged = response.isAcknowledged(); // <1> + // end::delete-model-snapshot-response + + assertTrue(isAcknowledged); + } + { + client.index(indexRequest, RequestOptions.DEFAULT); + + // tag::delete-model-snapshot-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::delete-model-snapshot-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + DeleteModelSnapshotRequest deleteModelSnapshotRequest = new DeleteModelSnapshotRequest(jobId, "1541587919"); + + // tag::delete-model-snapshot-execute-async + client.machineLearning().deleteModelSnapshotAsync(deleteModelSnapshotRequest, RequestOptions.DEFAULT, listener); // <1> + // end::delete-model-snapshot-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testGetModelSnapshots() throws IOException, InterruptedException { RestHighLevelClient client = highLevelClient(); @@ -2229,4 +2307,66 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } + + public void testUpdateFilter() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + String filterId = "update-filter-doc-test"; + MlFilter.Builder filterBuilder = MlFilter.builder(filterId).setDescription("test").setItems("*.google.com", "wikipedia.org"); + + client.machineLearning().putFilter(new PutFilterRequest(filterBuilder.build()), RequestOptions.DEFAULT); + + { + // tag::update-filter-request + UpdateFilterRequest request = new UpdateFilterRequest(filterId); // <1> + // end::update-filter-request + + // tag::update-filter-description + request.setDescription("my new description"); // <1> + // end::update-filter-description + + // tag::update-filter-add-items + request.setAddItems(Arrays.asList("*.bing.com", "*.elastic.co")); // <1> + // end::update-filter-add-items + + // tag::update-filter-remove-items + request.setRemoveItems(Arrays.asList("*.google.com")); // <1> + // end::update-filter-remove-items + + // tag::update-filter-execute + PutFilterResponse response = client.machineLearning().updateFilter(request, RequestOptions.DEFAULT); + // end::update-filter-execute + + // tag::update-filter-response + MlFilter updatedFilter = response.getResponse(); // <1> + // end::update-filter-response + assertEquals(request.getDescription(), updatedFilter.getDescription()); + } + { + UpdateFilterRequest request = new UpdateFilterRequest(filterId); + + // tag::update-filter-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(PutFilterResponse putFilterResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::update-filter-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::update-filter-execute-async + client.machineLearning().updateFilterAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::update-filter-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java index 6ace43099309b..c750e087157b2 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java @@ -32,8 +32,8 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.RollupClient; +import org.elasticsearch.client.core.AcknowledgedResponse; import org.elasticsearch.client.rollup.DeleteRollupJobRequest; -import org.elasticsearch.client.rollup.DeleteRollupJobResponse; import org.elasticsearch.client.rollup.GetRollupCapsRequest; import org.elasticsearch.client.rollup.GetRollupCapsResponse; import org.elasticsearch.client.rollup.GetRollupIndexCapsRequest; @@ -44,7 +44,6 @@ import org.elasticsearch.client.rollup.GetRollupJobResponse.RollupIndexerJobStats; import org.elasticsearch.client.rollup.GetRollupJobResponse.RollupJobStatus; import org.elasticsearch.client.rollup.PutRollupJobRequest; -import org.elasticsearch.client.rollup.PutRollupJobResponse; import org.elasticsearch.client.rollup.RollableIndexCaps; import org.elasticsearch.client.rollup.RollupJobCaps; import org.elasticsearch.client.rollup.StartRollupJobRequest; @@ -148,7 +147,7 @@ public void testCreateRollupJob() throws Exception { //end::x-pack-rollup-put-rollup-job-request //tag::x-pack-rollup-put-rollup-job-execute - PutRollupJobResponse response = client.rollup().putRollupJob(request, RequestOptions.DEFAULT); + AcknowledgedResponse response = client.rollup().putRollupJob(request, RequestOptions.DEFAULT); //end::x-pack-rollup-put-rollup-job-execute //tag::x-pack-rollup-put-rollup-job-response @@ -161,9 +160,9 @@ public void testCreateRollupJob() throws Exception { RollupJobConfig config = new RollupJobConfig(id, indexPattern, rollupIndex, cron, pageSize, groups, metrics, timeout); PutRollupJobRequest request = new PutRollupJobRequest(config); // tag::x-pack-rollup-put-rollup-job-execute-listener - ActionListener listener = new ActionListener() { + ActionListener listener = new ActionListener() { @Override - public void onResponse(PutRollupJobResponse response) { + public void onResponse(AcknowledgedResponse response) { // <1> } @@ -288,6 +287,8 @@ public void testStopRollupJob() throws Exception { String id = "job_1"; // tag::rollup-stop-job-request StopRollupJobRequest request = new StopRollupJobRequest(id); // <1> + request.waitForCompletion(true); // <2> + request.timeout(TimeValue.timeValueSeconds(10)); // <3> // end::rollup-stop-job-request @@ -354,7 +355,7 @@ public void testGetRollupCaps() throws Exception { pageSize, groups, metrics, timeout); PutRollupJobRequest request = new PutRollupJobRequest(config); - PutRollupJobResponse response = client.rollup().putRollupJob(request, RequestOptions.DEFAULT); + AcknowledgedResponse response = client.rollup().putRollupJob(request, RequestOptions.DEFAULT); boolean acknowledged = response.isAcknowledged(); //end::x-pack-rollup-get-rollup-caps-setup @@ -470,7 +471,7 @@ public void testGetRollupIndexCaps() throws Exception { pageSize, groups, metrics, timeout); PutRollupJobRequest request = new PutRollupJobRequest(config); - PutRollupJobResponse response = client.rollup().putRollupJob(request, RequestOptions.DEFAULT); + AcknowledgedResponse response = client.rollup().putRollupJob(request, RequestOptions.DEFAULT); boolean acknowledged = response.isAcknowledged(); //end::x-pack-rollup-get-rollup-index-caps-setup @@ -570,7 +571,7 @@ public void testDeleteRollupJob() throws Exception { // end::rollup-delete-job-request try { // tag::rollup-delete-job-execute - DeleteRollupJobResponse response = client.rollup().deleteRollupJob(request, RequestOptions.DEFAULT); + AcknowledgedResponse response = client.rollup().deleteRollupJob(request, RequestOptions.DEFAULT); // end::rollup-delete-job-execute // tag::rollup-delete-job-response @@ -581,9 +582,9 @@ public void testDeleteRollupJob() throws Exception { } // tag::rollup-delete-job-execute-listener - ActionListener listener = new ActionListener() { + ActionListener listener = new ActionListener() { @Override - public void onResponse(DeleteRollupJobResponse response) { + public void onResponse(AcknowledgedResponse response) { boolean acknowledged = response.isAcknowledged(); // <1> } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index 831c39ed28be6..766385c99aa71 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -174,8 +174,8 @@ public void testSearch() throws Exception { sourceBuilder.fetchSource(false); // end::search-source-filtering-off // tag::search-source-filtering-includes - String[] includeFields = new String[] {"title", "user", "innerObject.*"}; - String[] excludeFields = new String[] {"_type"}; + String[] includeFields = new String[] {"title", "innerObject.*"}; + String[] excludeFields = new String[] {"user"}; sourceBuilder.fetchSource(includeFields, excludeFields); // end::search-source-filtering-includes sourceBuilder.fetchSource(true); @@ -247,7 +247,6 @@ public void onFailure(Exception e) { for (SearchHit hit : searchHits) { // tag::search-hits-singleHit-properties String index = hit.getIndex(); - String type = hit.getType(); String id = hit.getId(); float score = hit.getScore(); // end::search-hits-singleHit-properties @@ -263,8 +262,8 @@ public void onFailure(Exception e) { assertEquals(3, totalHits); assertNotNull(hits.getHits()[0].getSourceAsString()); assertNotNull(hits.getHits()[0].getSourceAsMap().get("title")); - assertNotNull(hits.getHits()[0].getSourceAsMap().get("user")); assertNotNull(hits.getHits()[0].getSourceAsMap().get("innerObject")); + assertNull(hits.getHits()[0].getSourceAsMap().get("user")); } } @@ -1242,18 +1241,6 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } - { - // tag::multi-search-request-index - MultiSearchRequest request = new MultiSearchRequest(); - request.add(new SearchRequest("posts") // <1> - .types("doc")); // <2> - // end::multi-search-request-index - MultiSearchResponse response = client.msearch(request, RequestOptions.DEFAULT); - MultiSearchResponse.Item firstResponse = response.getResponses()[0]; - assertNull(firstResponse.getFailure()); - SearchResponse searchResponse = firstResponse.getResponse(); - assertEquals(3, searchResponse.getHits().getTotalHits()); - } } private void indexSearchTestData() throws IOException { @@ -1304,19 +1291,12 @@ public void testCount() throws Exception { // end::count-request-basic } { - // tag::count-request-indices-types - CountRequest countRequest = new CountRequest("blog"); // <1> - countRequest.types("doc"); // <2> - // end::count-request-indices-types - // tag::count-request-routing - countRequest.routing("routing"); // <1> - // end::count-request-routing - // tag::count-request-indicesOptions - countRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> - // end::count-request-indicesOptions - // tag::count-request-preference - countRequest.preference("_local"); // <1> - // end::count-request-preference + // tag::count-request-args + CountRequest countRequest = new CountRequest("blog") // <1> + .routing("routing") // <2> + .indicesOptions(IndicesOptions.lenientExpandOpen()) // <3> + .preference("_local"); // <4> + // end::count-request-args assertNotNull(client.count(countRequest, RequestOptions.DEFAULT)); } { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java index 71cfdd4ba5b89..39f57706a3667 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -51,6 +51,8 @@ import org.elasticsearch.client.security.GetRoleMappingsRequest; import org.elasticsearch.client.security.GetRoleMappingsResponse; import org.elasticsearch.client.security.GetSslCertificatesResponse; +import org.elasticsearch.client.security.HasPrivilegesRequest; +import org.elasticsearch.client.security.HasPrivilegesResponse; import org.elasticsearch.client.security.InvalidateTokenRequest; import org.elasticsearch.client.security.InvalidateTokenResponse; import org.elasticsearch.client.security.PutRoleMappingRequest; @@ -63,7 +65,9 @@ import org.elasticsearch.client.security.support.expressiondsl.expressions.AnyRoleMapperExpression; import org.elasticsearch.client.security.support.expressiondsl.fields.FieldRoleMapperExpression; import org.elasticsearch.client.security.user.User; +import org.elasticsearch.client.security.user.privileges.IndicesPrivileges; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.RestStatus; import org.hamcrest.Matchers; @@ -80,6 +84,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.isIn; @@ -437,6 +442,67 @@ public void onFailure(Exception e) { } } + public void testHasPrivileges() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + //tag::has-privileges-request + HasPrivilegesRequest request = new HasPrivilegesRequest( + Sets.newHashSet("monitor", "manage"), + Sets.newHashSet( + IndicesPrivileges.builder().indices("logstash-2018-10-05").privileges("read", "write").build(), + IndicesPrivileges.builder().indices("logstash-2018-*").privileges("read").build() + ), + null + ); + //end::has-privileges-request + + //tag::has-privileges-execute + HasPrivilegesResponse response = client.security().hasPrivileges(request, RequestOptions.DEFAULT); + //end::has-privileges-execute + + //tag::has-privileges-response + boolean hasMonitor = response.hasClusterPrivilege("monitor"); // <1> + boolean hasWrite = response.hasIndexPrivilege("logstash-2018-10-05", "write"); // <2> + boolean hasRead = response.hasIndexPrivilege("logstash-2018-*", "read"); // <3> + //end::has-privileges-response + + assertThat(response.getUsername(), is("test_user")); + assertThat(response.hasAllRequested(), is(true)); + assertThat(hasMonitor, is(true)); + assertThat(hasWrite, is(true)); + assertThat(hasRead, is(true)); + assertThat(response.getApplicationPrivileges().entrySet(), emptyIterable()); + } + + { + HasPrivilegesRequest request = new HasPrivilegesRequest(Collections.singleton("monitor"),null,null); + + // tag::has-privileges-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(HasPrivilegesResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::has-privileges-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::has-privileges-execute-async + client.security().hasPrivilegesAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::has-privileges-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testClearRealmCache() throws Exception { RestHighLevelClient client = highLevelClient(); { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteModelSnapshotRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteModelSnapshotRequestTests.java new file mode 100644 index 0000000000000..4e02344d76850 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteModelSnapshotRequestTests.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.test.ESTestCase; + +public class DeleteModelSnapshotRequestTests extends ESTestCase { + + public void test_WithNullJobId() { + NullPointerException ex = expectThrows(NullPointerException.class, () -> + new DeleteModelSnapshotRequest(null, randomAlphaOfLength(10))); + assertEquals("[job_id] must not be null", ex.getMessage()); + } + + public void test_WithNullSnapshotId() { + NullPointerException ex = expectThrows(NullPointerException.class, () + -> new DeleteModelSnapshotRequest(randomAlphaOfLength(10), null)); + assertEquals("[snapshot_id] must not be null", ex.getMessage()); + } + + private DeleteModelSnapshotRequest createTestInstance() { + return new DeleteModelSnapshotRequest(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/UpdateFilterRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/UpdateFilterRequestTests.java new file mode 100644 index 0000000000000..ee340c03d0820 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/UpdateFilterRequestTests.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.util.ArrayList; +import java.util.List; + + +public class UpdateFilterRequestTests extends AbstractXContentTestCase { + + @Override + protected UpdateFilterRequest createTestInstance() { + UpdateFilterRequest request = new UpdateFilterRequest(randomAlphaOfLength(10)); + if (randomBoolean()) { + request.setDescription(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + int items = randomInt(10); + List strings = new ArrayList<>(items); + for (int i = 0; i < items; i++) { + strings.add(randomAlphaOfLength(10)); + } + request.setAddItems(strings); + } + if (randomBoolean()) { + int items = randomInt(10); + List strings = new ArrayList<>(items); + for (int i = 0; i < items; i++) { + strings.add(randomAlphaOfLength(10)); + } + request.setRemoveItems(strings); + } + return request; + } + + @Override + protected UpdateFilterRequest doParseInstance(XContentParser parser) { + return UpdateFilterRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java index 7f92d1690f91a..c7fb1c80388be 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java @@ -103,6 +103,9 @@ public static DatafeedConfig.Builder createRandomBuilder() { if (randomBoolean()) { builder.setChunkingConfig(ChunkingConfigTests.createRandomizedChunk()); } + if (randomBoolean()) { + builder.setDelayedDataCheckConfig(DelayedDataCheckConfigTests.createRandomizedConfig()); + } return builder; } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java index 1f1675a330e1b..85a96b0a9e244 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java @@ -83,6 +83,9 @@ public static DatafeedUpdate createRandom() { if (randomBoolean()) { builder.setChunkingConfig(ChunkingConfigTests.createRandomizedChunk()); } + if (randomBoolean()) { + builder.setDelayedDataCheckConfig(DelayedDataCheckConfigTests.createRandomizedConfig()); + } return builder.build(); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DelayedDataCheckConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DelayedDataCheckConfigTests.java new file mode 100644 index 0000000000000..1e85c28149b28 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DelayedDataCheckConfigTests.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.datafeed; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class DelayedDataCheckConfigTests extends AbstractXContentTestCase { + + @Override + protected DelayedDataCheckConfig createTestInstance() { + return createRandomizedConfig(); + } + + @Override + protected DelayedDataCheckConfig doParseInstance(XContentParser parser) { + return DelayedDataCheckConfig.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + public void testEnabledDelayedDataCheckConfig() { + DelayedDataCheckConfig delayedDataCheckConfig = DelayedDataCheckConfig.enabledDelayedDataCheckConfig(TimeValue.timeValueHours(5)); + assertThat(delayedDataCheckConfig.isEnabled(), equalTo(true)); + assertThat(delayedDataCheckConfig.getCheckWindow(), equalTo(TimeValue.timeValueHours(5))); + } + + public void testDisabledDelayedDataCheckConfig() { + DelayedDataCheckConfig delayedDataCheckConfig = DelayedDataCheckConfig.disabledDelayedDataCheckConfig(); + assertThat(delayedDataCheckConfig.isEnabled(), equalTo(false)); + assertThat(delayedDataCheckConfig.getCheckWindow(), equalTo(null)); + } + + public static DelayedDataCheckConfig createRandomizedConfig() { + boolean enabled = randomBoolean(); + TimeValue timeWindow = null; + if (enabled || randomBoolean()) { + timeWindow = TimeValue.timeValueMillis(randomLongBetween(1, 1_000)); + } + return new DelayedDataCheckConfig(enabled, timeWindow); + } +} + diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/DeleteRollupJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/DeleteRollupJobResponseTests.java deleted file mode 100644 index 1dc02ff386de0..0000000000000 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/DeleteRollupJobResponseTests.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.client.rollup; - -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractXContentTestCase; -import org.junit.Before; - -import java.io.IOException; - -public class DeleteRollupJobResponseTests extends AbstractXContentTestCase { - - private boolean acknowledged; - - @Before - public void setupJobID() { - acknowledged = randomBoolean(); - } - - @Override - protected DeleteRollupJobResponse createTestInstance() { - return new DeleteRollupJobResponse(acknowledged); - } - - @Override - protected DeleteRollupJobResponse doParseInstance(XContentParser parser) throws IOException { - return DeleteRollupJobResponse.fromXContent(parser); - } - - @Override - protected boolean supportsUnknownFields() { - return false; - } - -} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/StartRollupJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/StartRollupJobResponseTests.java index 724e60d2d4a75..6ef7152fbbcc6 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/StartRollupJobResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/StartRollupJobResponseTests.java @@ -18,34 +18,25 @@ */ package org.elasticsearch.client.rollup; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractXContentTestCase; -import org.junit.Before; +import org.elasticsearch.client.core.AcknowledgedResponseTests; +import org.elasticsearch.test.ESTestCase; import java.io.IOException; -public class StartRollupJobResponseTests extends AbstractXContentTestCase { +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; - private boolean acknowledged; +public class StartRollupJobResponseTests extends ESTestCase { - @Before - public void setupAcknoledged() { - acknowledged = randomBoolean(); + public void testFromXContent() throws IOException { + xContentTester(this::createParser, + this::createTestInstance, + AcknowledgedResponseTests::toXContent, + StartRollupJobResponse::fromXContent) + .supportsUnknownFields(false) + .test(); } - - @Override - protected StartRollupJobResponse createTestInstance() { - return new StartRollupJobResponse(acknowledged); - } - - @Override - protected StartRollupJobResponse doParseInstance(XContentParser parser) throws IOException { - return StartRollupJobResponse.fromXContent(parser); - } - - @Override - protected boolean supportsUnknownFields() { - return false; + private StartRollupJobResponse createTestInstance() { + return new StartRollupJobResponse(randomBoolean()); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/StopRollupJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/StopRollupJobResponseTests.java index 0e5dc7894506c..bf66259b23faf 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/StopRollupJobResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/StopRollupJobResponseTests.java @@ -18,34 +18,25 @@ */ package org.elasticsearch.client.rollup; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractXContentTestCase; -import org.junit.Before; +import org.elasticsearch.client.core.AcknowledgedResponseTests; +import org.elasticsearch.test.ESTestCase; import java.io.IOException; -public class StopRollupJobResponseTests extends AbstractXContentTestCase { +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; - private boolean acknowledged; +public class StopRollupJobResponseTests extends ESTestCase { - @Before - public void setupAcknoledged() { - acknowledged = randomBoolean(); + public void testFromXContent() throws IOException { + xContentTester(this::createParser, + this::createTestInstance, + AcknowledgedResponseTests::toXContent, + StopRollupJobResponse::fromXContent) + .supportsUnknownFields(false) + .test(); } - - @Override - protected StopRollupJobResponse createTestInstance() { - return new StopRollupJobResponse(acknowledged); - } - - @Override - protected StopRollupJobResponse doParseInstance(XContentParser parser) throws IOException { - return StopRollupJobResponse.fromXContent(parser); - } - - @Override - protected boolean supportsUnknownFields() { - return false; + private StopRollupJobResponse createTestInstance() { + return new StopRollupJobResponse(randomBoolean()); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/HasPrivilegesRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/HasPrivilegesRequestTests.java new file mode 100644 index 0000000000000..5a888bd95e4ab --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/HasPrivilegesRequestTests.java @@ -0,0 +1,111 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.security; + +import org.elasticsearch.client.security.user.privileges.ApplicationResourcePrivileges; +import org.elasticsearch.client.security.user.privileges.IndicesPrivileges; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.elasticsearch.test.XContentTestUtils; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.Arrays; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Set; + +public class HasPrivilegesRequestTests extends ESTestCase { + + public void testToXContent() throws IOException { + final HasPrivilegesRequest request = new HasPrivilegesRequest( + new LinkedHashSet<>(Arrays.asList("monitor", "manage_watcher", "manage_ml")), + new LinkedHashSet<>(Arrays.asList( + IndicesPrivileges.builder().indices("index-001", "index-002").privileges("all").build(), + IndicesPrivileges.builder().indices("index-003").privileges("read").build() + )), + new LinkedHashSet<>(Arrays.asList( + new ApplicationResourcePrivileges("myapp", Arrays.asList("read", "write"), Arrays.asList("*")), + new ApplicationResourcePrivileges("myapp", Arrays.asList("admin"), Arrays.asList("/data/*")) + )) + ); + String json = Strings.toString(request); + final Map parsed = XContentHelper.convertToMap(XContentType.JSON.xContent(), json, false); + + final Map expected = XContentHelper.convertToMap(XContentType.JSON.xContent(), "{" + + " \"cluster\":[\"monitor\",\"manage_watcher\",\"manage_ml\"]," + + " \"index\":[{" + + " \"names\":[\"index-001\",\"index-002\"]," + + " \"privileges\":[\"all\"]" + + " },{" + + " \"names\":[\"index-003\"]," + + " \"privileges\":[\"read\"]" + + " }]," + + " \"application\":[{" + + " \"application\":\"myapp\"," + + " \"privileges\":[\"read\",\"write\"]," + + " \"resources\":[\"*\"]" + + " },{" + + " \"application\":\"myapp\"," + + " \"privileges\":[\"admin\"]," + + " \"resources\":[\"/data/*\"]" + + " }]" + + "}", false); + + assertThat(XContentTestUtils.differenceBetweenMapsIgnoringArrayOrder(parsed, expected), Matchers.nullValue()); + } + + public void testEqualsAndHashCode() { + final Set cluster = Sets.newHashSet(randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(3, 8))); + final Set indices = Sets.newHashSet(randomArray(1, 5, IndicesPrivileges[]::new, + () -> IndicesPrivileges.builder() + .indices(generateRandomStringArray(5, 12, false, false)) + .privileges(generateRandomStringArray(3, 8, false, false)) + .build())); + final Set application = Sets.newHashSet(randomArray(1, 5, ApplicationResourcePrivileges[]::new, + () -> new ApplicationResourcePrivileges( + randomAlphaOfLengthBetween(5, 12), + Sets.newHashSet(generateRandomStringArray(3, 8, false, false)), + Sets.newHashSet(generateRandomStringArray(2, 6, false, false)) + ))); + final HasPrivilegesRequest request = new HasPrivilegesRequest(cluster, indices, application); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(request, this::copy, this::mutate); + } + + private HasPrivilegesRequest copy(HasPrivilegesRequest request) { + return new HasPrivilegesRequest(request.getClusterPrivileges(), request.getIndexPrivileges(), request.getApplicationPrivileges()); + } + + private HasPrivilegesRequest mutate(HasPrivilegesRequest request) { + switch (randomIntBetween(1, 3)) { + case 1: + return new HasPrivilegesRequest(null, request.getIndexPrivileges(), request.getApplicationPrivileges()); + case 2: + return new HasPrivilegesRequest(request.getClusterPrivileges(), null, request.getApplicationPrivileges()); + case 3: + return new HasPrivilegesRequest(request.getClusterPrivileges(), request.getIndexPrivileges(), null); + } + throw new IllegalStateException("The universe is broken (or the RNG is)"); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/HasPrivilegesResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/HasPrivilegesResponseTests.java new file mode 100644 index 0000000000000..2fb542f4314d7 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/HasPrivilegesResponseTests.java @@ -0,0 +1,262 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.security; + +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +import static java.util.Collections.emptyMap; + +public class HasPrivilegesResponseTests extends ESTestCase { + + public void testParseValidResponse() throws IOException { + String json = "{" + + " \"username\": \"namor\"," + + " \"has_all_requested\": false," + + " \"cluster\" : {" + + " \"manage\" : false," + + " \"monitor\" : true" + + " }," + + " \"index\" : {" + + " \"index-01\": {" + + " \"read\" : true," + + " \"write\" : false" + + " }," + + " \"index-02\": {" + + " \"read\" : true," + + " \"write\" : true" + + " }," + + " \"index-03\": {" + + " \"read\" : false," + + " \"write\" : false" + + " }" + + " }," + + " \"application\" : {" + + " \"app01\" : {" + + " \"/object/1\" : {" + + " \"read\" : true," + + " \"write\" : false" + + " }," + + " \"/object/2\" : {" + + " \"read\" : true," + + " \"write\" : true" + + " }" + + " }," + + " \"app02\" : {" + + " \"/object/1\" : {" + + " \"read\" : false," + + " \"write\" : false" + + " }," + + " \"/object/3\" : {" + + " \"read\" : false," + + " \"write\" : true" + + " }" + + " }" + + " }" + + "}"; + final XContentParser parser = createParser(XContentType.JSON.xContent(), json); + HasPrivilegesResponse response = HasPrivilegesResponse.fromXContent(parser); + + assertThat(response.getUsername(), Matchers.equalTo("namor")); + assertThat(response.hasAllRequested(), Matchers.equalTo(false)); + + assertThat(response.getClusterPrivileges().keySet(), Matchers.containsInAnyOrder("monitor", "manage")); + assertThat(response.hasClusterPrivilege("monitor"), Matchers.equalTo(true)); + assertThat(response.hasClusterPrivilege("manage"), Matchers.equalTo(false)); + + assertThat(response.getIndexPrivileges().keySet(), Matchers.containsInAnyOrder("index-01", "index-02", "index-03")); + assertThat(response.hasIndexPrivilege("index-01", "read"), Matchers.equalTo(true)); + assertThat(response.hasIndexPrivilege("index-01", "write"), Matchers.equalTo(false)); + assertThat(response.hasIndexPrivilege("index-02", "read"), Matchers.equalTo(true)); + assertThat(response.hasIndexPrivilege("index-02", "write"), Matchers.equalTo(true)); + assertThat(response.hasIndexPrivilege("index-03", "read"), Matchers.equalTo(false)); + assertThat(response.hasIndexPrivilege("index-03", "write"), Matchers.equalTo(false)); + + assertThat(response.getApplicationPrivileges().keySet(), Matchers.containsInAnyOrder("app01", "app02")); + assertThat(response.hasApplicationPrivilege("app01", "/object/1", "read"), Matchers.equalTo(true)); + assertThat(response.hasApplicationPrivilege("app01", "/object/1", "write"), Matchers.equalTo(false)); + assertThat(response.hasApplicationPrivilege("app01", "/object/2", "read"), Matchers.equalTo(true)); + assertThat(response.hasApplicationPrivilege("app01", "/object/2", "write"), Matchers.equalTo(true)); + assertThat(response.hasApplicationPrivilege("app02", "/object/1", "read"), Matchers.equalTo(false)); + assertThat(response.hasApplicationPrivilege("app02", "/object/1", "write"), Matchers.equalTo(false)); + assertThat(response.hasApplicationPrivilege("app02", "/object/3", "read"), Matchers.equalTo(false)); + assertThat(response.hasApplicationPrivilege("app02", "/object/3", "write"), Matchers.equalTo(true)); + } + + public void testHasClusterPrivilege() { + final Map cluster = MapBuilder.newMapBuilder() + .put("a", true) + .put("b", false) + .put("c", false) + .put("d", true) + .map(); + final HasPrivilegesResponse response = new HasPrivilegesResponse("x", false, cluster, emptyMap(), emptyMap()); + assertThat(response.hasClusterPrivilege("a"), Matchers.is(true)); + assertThat(response.hasClusterPrivilege("b"), Matchers.is(false)); + assertThat(response.hasClusterPrivilege("c"), Matchers.is(false)); + assertThat(response.hasClusterPrivilege("d"), Matchers.is(true)); + + final IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> response.hasClusterPrivilege("e")); + assertThat(iae.getMessage(), Matchers.containsString("[e]")); + assertThat(iae.getMessage().toLowerCase(Locale.ROOT), Matchers.containsString("cluster privilege")); + } + + public void testHasIndexPrivilege() { + final Map> index = MapBuilder.>newMapBuilder() + .put("i1", Collections.singletonMap("read", true)) + .put("i2", Collections.singletonMap("read", false)) + .put("i3", MapBuilder.newMapBuilder().put("read", true).put("write", true).map()) + .put("i4", MapBuilder.newMapBuilder().put("read", true).put("write", false).map()) + .put("i*", MapBuilder.newMapBuilder().put("read", false).put("write", false).map()) + .map(); + final HasPrivilegesResponse response = new HasPrivilegesResponse("x", false, emptyMap(), index, emptyMap()); + assertThat(response.hasIndexPrivilege("i1", "read"), Matchers.is(true)); + assertThat(response.hasIndexPrivilege("i2", "read"), Matchers.is(false)); + assertThat(response.hasIndexPrivilege("i3", "read"), Matchers.is(true)); + assertThat(response.hasIndexPrivilege("i3", "write"), Matchers.is(true)); + assertThat(response.hasIndexPrivilege("i4", "read"), Matchers.is(true)); + assertThat(response.hasIndexPrivilege("i4", "write"), Matchers.is(false)); + assertThat(response.hasIndexPrivilege("i*", "read"), Matchers.is(false)); + assertThat(response.hasIndexPrivilege("i*", "write"), Matchers.is(false)); + + final IllegalArgumentException iae1 = expectThrows(IllegalArgumentException.class, () -> response.hasIndexPrivilege("i0", "read")); + assertThat(iae1.getMessage(), Matchers.containsString("index [i0]")); + + final IllegalArgumentException iae2 = expectThrows(IllegalArgumentException.class, () -> response.hasIndexPrivilege("i1", "write")); + assertThat(iae2.getMessage().toLowerCase(Locale.ROOT), Matchers.containsString("privilege [write]")); + assertThat(iae2.getMessage(), Matchers.containsString("index [i1]")); + } + + public void testHasApplicationPrivilege() { + final Map> app1 = MapBuilder.>newMapBuilder() + .put("/data/1", Collections.singletonMap("read", true)) + .put("/data/2", Collections.singletonMap("read", false)) + .put("/data/3", MapBuilder.newMapBuilder().put("read", true).put("write", true).map()) + .put("/data/4", MapBuilder.newMapBuilder().put("read", true).put("write", false).map()) + .map(); + final Map> app2 = MapBuilder.>newMapBuilder() + .put("/action/1", Collections.singletonMap("execute", true)) + .put("/action/*", Collections.singletonMap("execute", false)) + .map(); + Map>> appPrivileges = new HashMap<>(); + appPrivileges.put("a1", app1); + appPrivileges.put("a2", app2); + final HasPrivilegesResponse response = new HasPrivilegesResponse("x", false, emptyMap(), emptyMap(), appPrivileges); + assertThat(response.hasApplicationPrivilege("a1", "/data/1", "read"), Matchers.is(true)); + assertThat(response.hasApplicationPrivilege("a1", "/data/2", "read"), Matchers.is(false)); + assertThat(response.hasApplicationPrivilege("a1", "/data/3", "read"), Matchers.is(true)); + assertThat(response.hasApplicationPrivilege("a1", "/data/3", "write"), Matchers.is(true)); + assertThat(response.hasApplicationPrivilege("a1", "/data/4", "read"), Matchers.is(true)); + assertThat(response.hasApplicationPrivilege("a1", "/data/4", "write"), Matchers.is(false)); + assertThat(response.hasApplicationPrivilege("a2", "/action/1", "execute"), Matchers.is(true)); + assertThat(response.hasApplicationPrivilege("a2", "/action/*", "execute"), Matchers.is(false)); + + final IllegalArgumentException iae1 = expectThrows(IllegalArgumentException.class, + () -> response.hasApplicationPrivilege("a0", "/data/1", "read")); + assertThat(iae1.getMessage().toLowerCase(Locale.ROOT), Matchers.containsString("application [a0]")); + + final IllegalArgumentException iae2 = expectThrows(IllegalArgumentException.class, + () -> response.hasApplicationPrivilege("a1", "/data/0", "read")); + assertThat(iae2.getMessage().toLowerCase(Locale.ROOT), Matchers.containsString("application [a1]")); + assertThat(iae2.getMessage().toLowerCase(Locale.ROOT), Matchers.containsString("resource [/data/0]")); + + final IllegalArgumentException iae3 = expectThrows(IllegalArgumentException.class, + () -> response.hasApplicationPrivilege("a1", "/action/1", "execute")); + assertThat(iae3.getMessage().toLowerCase(Locale.ROOT), Matchers.containsString("application [a1]")); + assertThat(iae3.getMessage().toLowerCase(Locale.ROOT), Matchers.containsString("resource [/action/1]")); + + final IllegalArgumentException iae4 = expectThrows(IllegalArgumentException.class, + () -> response.hasApplicationPrivilege("a1", "/data/1", "write")); + assertThat(iae4.getMessage().toLowerCase(Locale.ROOT), Matchers.containsString("application [a1]")); + assertThat(iae4.getMessage().toLowerCase(Locale.ROOT), Matchers.containsString("resource [/data/1]")); + assertThat(iae4.getMessage().toLowerCase(Locale.ROOT), Matchers.containsString("privilege [write]")); + } + + public void testEqualsAndHashCode() { + final HasPrivilegesResponse response = randomResponse(); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(response, this::copy, this::mutate); + } + + private HasPrivilegesResponse copy(HasPrivilegesResponse response) { + return new HasPrivilegesResponse(response.getUsername(), + response.hasAllRequested(), + response.getClusterPrivileges(), + response.getIndexPrivileges(), + response.getApplicationPrivileges()); + } + + private HasPrivilegesResponse mutate(HasPrivilegesResponse request) { + switch (randomIntBetween(1, 5)) { + case 1: + return new HasPrivilegesResponse("_" + request.getUsername(), request.hasAllRequested(), + request.getClusterPrivileges(), request.getIndexPrivileges(), request.getApplicationPrivileges()); + case 2: + return new HasPrivilegesResponse(request.getUsername(), request.hasAllRequested() == false, + request.getClusterPrivileges(), request.getIndexPrivileges(), request.getApplicationPrivileges()); + case 3: + return new HasPrivilegesResponse(request.getUsername(), request.hasAllRequested(), + emptyMap(), request.getIndexPrivileges(), request.getApplicationPrivileges()); + case 4: + return new HasPrivilegesResponse(request.getUsername(), request.hasAllRequested(), + request.getClusterPrivileges(), emptyMap(), request.getApplicationPrivileges()); + case 5: + return new HasPrivilegesResponse(request.getUsername(), request.hasAllRequested(), + request.getClusterPrivileges(), request.getIndexPrivileges(), emptyMap()); + } + throw new IllegalStateException("The universe is broken (or the RNG is)"); + } + + private HasPrivilegesResponse randomResponse() { + final Map cluster = randomPrivilegeMap(); + final Map> index = randomResourceMap(); + + final Map>> application = new HashMap<>(); + for (String app : randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(3, 6).toLowerCase(Locale.ROOT))) { + application.put(app, randomResourceMap()); + } + return new HasPrivilegesResponse(randomAlphaOfLengthBetween(3, 8), randomBoolean(), cluster, index, application); + } + + private Map> randomResourceMap() { + final Map> resource = new HashMap<>(); + for (String res : randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(5, 8))) { + resource.put(res, randomPrivilegeMap()); + } + return resource; + } + + private Map randomPrivilegeMap() { + final Map map = new HashMap<>(); + for (String privilege : randomArray(1, 6, String[]::new, () -> randomAlphaOfLengthBetween(3, 12))) { + map.put(privilege, randomBoolean()); + } + return map; + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/xpack/XPackInfoResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/xpack/XPackInfoResponseTests.java deleted file mode 100644 index 702c4bef64bd2..0000000000000 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/xpack/XPackInfoResponseTests.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.xpack; - -import org.elasticsearch.client.license.LicenseStatus; -import org.elasticsearch.client.xpack.XPackInfoResponse.BuildInfo; -import org.elasticsearch.client.xpack.XPackInfoResponse.FeatureSetsInfo; -import org.elasticsearch.client.xpack.XPackInfoResponse.FeatureSetsInfo.FeatureSet; -import org.elasticsearch.client.xpack.XPackInfoResponse.LicenseInfo; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractXContentTestCase; - -import java.io.IOException; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import java.util.function.Predicate; - -public class XPackInfoResponseTests extends AbstractXContentTestCase { - - @Override - protected boolean supportsUnknownFields() { - return true; - } - - protected XPackInfoResponse doParseInstance(XContentParser parser) throws IOException { - return XPackInfoResponse.fromXContent(parser); - } - - protected Predicate getRandomFieldsExcludeFilter() { - return path -> path.equals("features") - || (path.startsWith("features") && path.endsWith("native_code_info")); - } - - protected ToXContent.Params getToXContentParams() { - Map params = new HashMap<>(); - if (randomBoolean()) { - params.put("human", randomBoolean() ? "true" : "false"); - } - if (randomBoolean()) { - params.put("categories", "_none"); - } - return new ToXContent.MapParams(params); - } - - protected XPackInfoResponse createTestInstance() { - return new XPackInfoResponse( - randomBoolean() ? null : randomBuildInfo(), - randomBoolean() ? null : randomLicenseInfo(), - randomBoolean() ? null : randomFeatureSetsInfo()); - } - - private BuildInfo randomBuildInfo() { - return new BuildInfo( - randomAlphaOfLength(10), - randomAlphaOfLength(15)); - } - - private LicenseInfo randomLicenseInfo() { - return new LicenseInfo( - randomAlphaOfLength(10), - randomAlphaOfLength(4), - randomAlphaOfLength(5), - randomFrom(LicenseStatus.values()), - randomLong()); - } - - private FeatureSetsInfo randomFeatureSetsInfo() { - int size = between(0, 10); - Set featureSets = new HashSet<>(size); - while (featureSets.size() < size) { - featureSets.add(randomFeatureSet()); - } - return new FeatureSetsInfo(featureSets); - } - - private FeatureSet randomFeatureSet() { - return new FeatureSet( - randomAlphaOfLength(5), - randomBoolean() ? null : randomAlphaOfLength(20), - randomBoolean(), - randomBoolean(), - randomNativeCodeInfo()); - } - - private Map randomNativeCodeInfo() { - if (randomBoolean()) { - return null; - } - int size = between(0, 10); - Map nativeCodeInfo = new HashMap<>(size); - while (nativeCodeInfo.size() < size) { - nativeCodeInfo.put(randomAlphaOfLength(5), randomAlphaOfLength(5)); - } - return nativeCodeInfo; - } -} diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 7f7636eb85998..72d77d969f231 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -11,7 +11,7 @@ release-state can be: released | prerelease | unreleased ////////// -:release-state: unreleased +:release-state: prerelease :issue: https://github.com/elastic/elasticsearch/issues/ :pull: https://github.com/elastic/elasticsearch/pull/ diff --git a/docs/java-rest/high-level/document/bulk.asciidoc b/docs/java-rest/high-level/document/bulk.asciidoc index d794779435af3..c50a1f790583b 100644 --- a/docs/java-rest/high-level/document/bulk.asciidoc +++ b/docs/java-rest/high-level/document/bulk.asciidoc @@ -37,9 +37,9 @@ And different operation types can be added to the same +{request}+: -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request-with-mixed-operations] -------------------------------------------------- -<1> Adds a `DeleteRequest` to the `BulkRequest`. See <<{upid}-delete>> +<1> Adds a `DeleteRequest` to the +{request}+. See <<{upid}-delete>> for more information on how to build `DeleteRequest`. -<2> Adds an `UpdateRequest` to the `BulkRequest`. See <<{upid}-update>> +<2> Adds an `UpdateRequest` to the +{request}+. See <<{upid}-update>> for more information on how to build `UpdateRequest`. <3> Adds an `IndexRequest` using the SMILE format @@ -72,22 +72,22 @@ the index/update/delete operations. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-request-pipeline] +include-tagged::{doc-tests-file}[{api}-request-pipeline] -------------------------------------------------- <1> Global pipelineId used on all sub requests, unless overridden on a sub request ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-request-routing] +include-tagged::{doc-tests-file}[{api}-request-routing] -------------------------------------------------- <1> Global routingId used on all sub requests, unless overridden on a sub request ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-request-index-type] +include-tagged::{doc-tests-file}[{api}-request-index-type] -------------------------------------------------- <1> A bulk request with global index and type used on all sub requests, unless overridden on a sub request. -Both parameters are @Nullable and can only be set during BulkRequest creation. +Both parameters are @Nullable and can only be set during +{request}+ creation. include::../execution.asciidoc[] @@ -167,7 +167,7 @@ actions currently added (defaults to 1000, use -1 to disable it) actions currently added (defaults to 5Mb, use -1 to disable it) <3> Set the number of concurrent requests allowed to be executed (default to 1, use 0 to only allow the execution of a single request) -<4> Set a flush interval flushing any `BulkRequest` pending if the +<4> Set a flush interval flushing any +{request}+ pending if the interval passes (defaults to not set) <5> Set a constant back off policy that initially waits for 1 second and retries up to 3 times. See `BackoffPolicy.noBackoff()`, diff --git a/docs/java-rest/high-level/document/reindex.asciidoc b/docs/java-rest/high-level/document/reindex.asciidoc index 2482467410c96..7d8876aa1269a 100644 --- a/docs/java-rest/high-level/document/reindex.asciidoc +++ b/docs/java-rest/high-level/document/reindex.asciidoc @@ -10,7 +10,7 @@ [id="{upid}-{api}-request"] ==== Reindex Request -A +{request} can be used to copy documents from one or more indexes into a +A +{request}+ can be used to copy documents from one or more indexes into a destination index. It requires an existing source index and a target index which may or may not exist pre-request. Reindex does not attempt @@ -100,7 +100,7 @@ include-tagged::{doc-tests-file}[{api}-request-sort] <1> add descending sort to`field1` <2> add ascending sort to `field2` -+{request} also supports a `script` that modifies the document. It allows you to ++{request}+ also supports a `script` that modifies the document. It allows you to also change the document's metadata. The following example illustrates that. ["source","java",subs="attributes,callouts,macros"] @@ -157,6 +157,19 @@ include-tagged::{doc-tests-file}[{api}-request-refresh] include::../execution.asciidoc[] +[id="{upid}-{api}-task-submission"] +==== Reindex task submission +It is also possible to submit a +{request}+ and not wait for it completion with the use of Task API. This is an equivalent of a REST request +with wait_for_completion flag set to false. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{hlrc-tests}/ReindexIT.java[submit-reindex-task] +-------------------------------------------------- +<1> A +{request}+ is constructed the same way as for the synchronous method +<2> A submit method returns a `TaskSubmissionResponse` which contains a task identifier. +<3> The task identifier can be used to get `response` from a completed task. + [id="{upid}-{api}-response"] ==== Reindex Response diff --git a/docs/java-rest/high-level/ilm/get_lifecycle_policy.asciidoc b/docs/java-rest/high-level/ilm/get_lifecycle_policy.asciidoc new file mode 100644 index 0000000000000..b86fad5880f67 --- /dev/null +++ b/docs/java-rest/high-level/ilm/get_lifecycle_policy.asciidoc @@ -0,0 +1,40 @@ +-- +:api: ilm-get-lifecycle-policy +:request: GetLifecyclePolicyRequest +:response: GetLifecyclePolicyResponse +-- + +[id="{upid}-{api}"] +=== Get Lifecycle Policy API + + +[id="{upid}-{api}-request"] +==== Request + +The Get Lifecycle Policy API allows you to retrieve the definition of an Index +Lifecycle Management Policy from the cluster. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> Gets all policies. +<2> Gets `my_policy` and `other_policy` + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ contains a map of `LifecyclePolicyMetadata`, +accessible by the name of the policy, which contains data about each policy, +as well as the policy definition. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> The retrieved policies are retrieved by name. +<2> The policy definition itself. + +include::../execution.asciidoc[] + + diff --git a/docs/java-rest/high-level/index.asciidoc b/docs/java-rest/high-level/index.asciidoc index a15967e9ad717..2010c9c539a54 100644 --- a/docs/java-rest/high-level/index.asciidoc +++ b/docs/java-rest/high-level/index.asciidoc @@ -25,6 +25,7 @@ the same response objects. -- :doc-tests: {docdir}/../../client/rest-high-level/src/test/java/org/elasticsearch/client/documentation +:hlrc-tests: {docdir}/../../client/rest-high-level/src/test/java/org/elasticsearch/client include::getting-started.asciidoc[] include::supported-apis.asciidoc[] diff --git a/docs/java-rest/high-level/migration/upgrade.asciidoc b/docs/java-rest/high-level/migration/upgrade.asciidoc index 76eae0652d9bf..7497b74d38391 100644 --- a/docs/java-rest/high-level/migration/upgrade.asciidoc +++ b/docs/java-rest/high-level/migration/upgrade.asciidoc @@ -1,14 +1,22 @@ +-- +:api: upgrade +:request: IndexUpgradeRequest +:response: BulkByScrollResponse +:submit_response: IndexUpgradeSubmissionResponse +:doc-tests-file: {doc-tests}/MigrationClientDocumentationIT.java +-- + [[java-rest-high-migration-upgrade]] === Migration Upgrade [[java-rest-high-migraton-upgrade-request]] ==== Index Upgrade Request -An `IndexUpgradeRequest` requires an index argument. Only one index at the time should be upgraded: +An +{request}+ requires an index argument. Only one index at the time should be upgraded: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/MigrationClientDocumentationIT.java[upgrade-request] +include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- <1> Create a new request instance @@ -17,39 +25,37 @@ include-tagged::{doc-tests}/MigrationClientDocumentationIT.java[upgrade-request] ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/MigrationClientDocumentationIT.java[upgrade-execute] +include-tagged::{doc-tests-file}[{api}-execute] -------------------------------------------------- [[java-rest-high-migration-upgrade-response]] ==== Response -The returned `BulkByScrollResponse` contains information about the executed operation +The returned +{response}+ contains information about the executed operation [[java-rest-high-migraton-async-upgrade-request]] ==== Asynchronous Execution -The asynchronous execution of a upgrade request requires both the `IndexUpgradeRequest` +The asynchronous execution of an upgrade request requires both the +{request}+ instance and an `ActionListener` instance to be passed to the asynchronous method: -A typical listener for `BulkResponse` looks like: - ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/MigrationClientDocumentationIT.java[upgrade-async-listener] +include-tagged::{doc-tests-file}[{api}-async-listener] -------------------------------------------------- <1> Called when the execution is successfully completed. The response is provided as an argument and contains a list of individual results for each operation that was executed. Note that one or more operations might have failed while the others have been successfully executed. -<2> Called when the whole `IndexUpgradeRequest` fails. In this case the raised +<2> Called when the whole +{request}+ fails. In this case the raised exception is provided as an argument and no operation has been executed. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/MigrationClientDocumentationIT.java[upgrade-async-execute] +include-tagged::{doc-tests-file}[{api}-async-execute] -------------------------------------------------- -<1> The `IndexUpgradeRequest` to execute and the `ActionListener` to use when +<1> The +{request}+ to execute and the `ActionListener` to use when the execution completes The asynchronous method does not block and returns immediately. Once it is @@ -59,11 +65,11 @@ it failed. === Migration Upgrade with Task API -Submission of upgrade request task will requires the `IndexUpgradeRequest` and will return -`IndexUpgradeSubmissionResponse`. The `IndexUpgradeSubmissionResponse` can later be use to fetch +Submission of upgrade request task will requires the +{request}+ and will return ++{submit_response}+. The +{submit_response}+ can later be use to fetch TaskId and query the Task API for results. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/MigrationClientDocumentationIT.java[upgrade-task-api] +include-tagged::{doc-tests-file}[{api}-task-api] -------------------------------------------------- diff --git a/docs/java-rest/high-level/ml/delete-model-snapshot.asciidoc b/docs/java-rest/high-level/ml/delete-model-snapshot.asciidoc new file mode 100644 index 0000000000000..6ede01901dabe --- /dev/null +++ b/docs/java-rest/high-level/ml/delete-model-snapshot.asciidoc @@ -0,0 +1,30 @@ +-- +:api: delete-model-snapshot +:request: DeleteModelSnapshotRequest +:response: AcknowledgedResponse +-- +[id="{upid}-{api}"] +=== Delete Model Snapshot API + +[id="{upid}-{api}-request"] +==== Delete Model Snapshot Request + +A +{request}+ object requires both a non-null `jobId` and a non-null `snapshotId`. + +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +--------------------------------------------------- +<1> Constructing a new request referencing existing `jobId` and `snapshotId`. + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Delete Model Snapshot Response + +The returned +{response}+ object indicates the acknowledgement of the request: +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +--------------------------------------------------- +<1> `isAcknowledged` was the deletion request acknowledged or not diff --git a/docs/java-rest/high-level/ml/put-datafeed.asciidoc b/docs/java-rest/high-level/ml/put-datafeed.asciidoc index e9f66f0b61d76..ed8a089c7bed8 100644 --- a/docs/java-rest/high-level/ml/put-datafeed.asciidoc +++ b/docs/java-rest/high-level/ml/put-datafeed.asciidoc @@ -63,6 +63,17 @@ include-tagged::{doc-tests-file}[{api}-config-set-query-delay] -------------------------------------------------- <1> The time interval behind real time that data is queried. +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-config-set-delayed-data-check-config] +-------------------------------------------------- +<1> Sets the delayed data check configuration. +The window must be larger than the Job's bucket size, but smaller than 24 hours, +and span less than 10,000 buckets. +Defaults to `null`, which causes an appropriate window span to be calculated when +the datafeed runs. +To explicitly disable, pass `DelayedDataCheckConfig.disabledDelayedDataCheckConfig()`. + ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-config-set-script-fields] diff --git a/docs/java-rest/high-level/ml/update-filter.asciidoc b/docs/java-rest/high-level/ml/update-filter.asciidoc new file mode 100644 index 0000000000000..b100000ddc1e8 --- /dev/null +++ b/docs/java-rest/high-level/ml/update-filter.asciidoc @@ -0,0 +1,57 @@ +-- +:api: update-filter +:request: UpdateFilterRequest +:response: PutFilterResponse +-- +[id="{upid}-{api}"] +=== Update Filter API + +The Update Filter API can be used to update an existing {ml} filter +in the cluster. The API accepts a +{request}+ object +as a request and returns a +{response}+. + +[id="{upid}-{api}-request"] +==== Update Filter Request + +A +{request}+ requires the following argument: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The id of the existing {ml} filter + +==== Optional Arguments +The following arguments are optional: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-description] +-------------------------------------------------- +<1> The updated description of the {ml} filter + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-add-items] +-------------------------------------------------- +<1> The list of items to add to the {ml} filter + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-remove-items] +-------------------------------------------------- +<1> The list of items to remove from the {ml} filter + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ returns the full representation of +the updated {ml} filter if it has been successfully updated. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> The updated `MlFilter` diff --git a/docs/java-rest/high-level/rollup/stop_job.asciidoc b/docs/java-rest/high-level/rollup/stop_job.asciidoc index 41ec965bb7c28..cba1dcdd2d374 100644 --- a/docs/java-rest/high-level/rollup/stop_job.asciidoc +++ b/docs/java-rest/high-level/rollup/stop_job.asciidoc @@ -17,6 +17,11 @@ The Stop Rollup Job API allows you to stop a job by ID. include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- <1> The ID of the job to stop. +<2> Whether the request should wait that the stop operation has completed +before returning (optional, defaults to `false`) +<3> If `wait_for_completion=true`, this parameter controls how long to wait +before giving up and throwing an error (optional, defaults to 30 seconds). + [id="{upid}-{api}-response"] ==== Response diff --git a/docs/java-rest/high-level/search/count.asciidoc b/docs/java-rest/high-level/search/count.asciidoc index f70e1e1fd4d22..2796d34ab36a0 100644 --- a/docs/java-rest/high-level/search/count.asciidoc +++ b/docs/java-rest/high-level/search/count.asciidoc @@ -29,34 +29,16 @@ include-tagged::{doc-tests-file}[{api}-request-basic] [[java-rest-high-count-request-optional]] ===== Count Request optional arguments -Let's first look at some of the optional arguments of a +{request}+: +A +{request}+ also takes the following optional arguments: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests-file}[{api}-request-indices-types] +include-tagged::{doc-tests-file}[{api}-request-args] -------------------------------------------------- <1> Restricts the request to an index -<2> Limits the request to a type - -There are a couple of other interesting optional parameters: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests-file}[{api}-request-routing] --------------------------------------------------- -<1> Set a routing parameter - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests-file}[{api}-request-indicesOptions] --------------------------------------------------- -<1> Setting `IndicesOptions` controls how unavailable indices are resolved and how wildcard expressions are expanded - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests-file}[{api}-request-preference] --------------------------------------------------- -<1> Use the preference parameter e.g. to execute the search to prefer local shards. The default is to randomize across shards. +<2> Set a routing parameter +<3> Setting `IndicesOptions` controls how unavailable indices are resolved and how wildcard expressions are expanded +<4> Use the preference parameter e.g. to execute the search to prefer local shards. The default is to randomize across shards. ===== Using the SearchSourceBuilder in CountRequest diff --git a/docs/java-rest/high-level/search/search.asciidoc b/docs/java-rest/high-level/search/search.asciidoc index 81e680b92506b..ab27a432fe5bc 100644 --- a/docs/java-rest/high-level/search/search.asciidoc +++ b/docs/java-rest/high-level/search/search.asciidoc @@ -289,8 +289,8 @@ be iterated over: include-tagged::{doc-tests-file}[{api}-hits-singleHit] -------------------------------------------------- -The `SearchHit` provides access to basic information like index, type, docId and -score of each search hit: +The `SearchHit` provides access to basic information like index, document ID +and score of each search hit: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/docs/java-rest/high-level/security/has-privileges.asciidoc b/docs/java-rest/high-level/security/has-privileges.asciidoc new file mode 100644 index 0000000000000..181b1b7f48167 --- /dev/null +++ b/docs/java-rest/high-level/security/has-privileges.asciidoc @@ -0,0 +1,86 @@ +-- +:api: has-privileges +:request: HasPrivilegesRequest +:response: HasPrivilegesResponse +-- + +[id="{upid}-{api}"] +=== Has Privileges API + +[id="{upid}-{api}-request"] +==== Has Privileges Request +The +{request}+ supports checking for any or all of the following privilege types: + +* Cluster Privileges +* Index Privileges +* Application Privileges + +Privileges types that you do not wish to check my be passed in as +null+, but as least +one privilege must be specified. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Has Privileges Response + +The returned +{response}+ contains the following properties + +`username`:: +The username (userid) of the current user (for whom the "has privileges" +check was executed) + +`hasAllRequested`:: +`true` if the user has all of the privileges that were specified in the ++{request}+. Otherwise `false`. + +`clusterPrivileges`:: +A `Map` where each key is the name of one of the cluster +privileges specified in the request, and the value is `true` if the user +has that privilege, and `false` otherwise. ++ +The method `hasClusterPrivilege` can be used to retrieve this information +in a more fluent manner. This method throws an `IllegalArgumentException` +if the privilege was not included in the response (which will be the case +if the privilege was not part of the request). + +`indexPrivileges`:: +A `Map>` where each key is the name of an +index (as specified in the +{request}+) and the value is a `Map` from +privilege name to a `Boolean`. The `Boolean` value is `true` if the user +has that privilege on that index, and `false` otherwise. ++ +The method `hasIndexPrivilege` can be used to retrieve this information +in a more fluent manner. This method throws an `IllegalArgumentException` +if the privilege was not included in the response (which will be the case +if the privilege was not part of the request). + +`applicationPrivileges`:: +A `Map>>>` where each key is the +name of an application (as specified in the +{request}+). +For each application, the value is a `Map` keyed by resource name, with +each value being another `Map` from privilege name to a `Boolean`. +The `Boolean` value is `true` if the user has that privilege on that +resource for that application, and `false` otherwise. ++ +The method `hasApplicationPrivilege` can be used to retrieve this +information in a more fluent manner. This method throws an +`IllegalArgumentException` if the privilege was not included in the +response (which will be the case if the privilege was not part of the +request). + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> `hasMonitor` will be `true` if the user has the `"monitor"` + cluster privilege. +<2> `hasWrite` will be `true` if the user has the `"write"` + privilege on the `"logstash-2018-10-05"` index. +<3> `hasRead` will be `true` if the user has the `"read"` + privilege on all possible indices that would match + the `"logstash-2018-*"` pattern. + diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index c174ce1a3aeda..5fa05135cc035 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -267,6 +267,8 @@ The Java High Level REST Client supports the following Machine Learning APIs: * <<{upid}-put-filter>> * <<{upid}-get-model-snapshots>> * <<{upid}-get-filters>> +* <<{upid}-delete-model-snapshot>> +* <<{upid}-update-filter>> include::ml/put-job.asciidoc[] include::ml/get-job.asciidoc[] @@ -298,6 +300,8 @@ include::ml/delete-calendar.asciidoc[] include::ml/put-filter.asciidoc[] include::ml/get-model-snapshots.asciidoc[] include::ml/get-filters.asciidoc[] +include::ml/delete-model-snapshot.asciidoc[] +include::ml/update-filter.asciidoc[] == Migration APIs @@ -347,6 +351,7 @@ The Java High Level REST Client supports the following Security APIs: * <<{upid}-clear-roles-cache>> * <<{upid}-clear-realm-cache>> * <<{upid}-authenticate>> +* <<{upid}-has-privileges>> * <> * <> * <> @@ -364,6 +369,7 @@ include::security/delete-privileges.asciidoc[] include::security/clear-roles-cache.asciidoc[] include::security/clear-realm-cache.asciidoc[] include::security/authenticate.asciidoc[] +include::security/has-privileges.asciidoc[] include::security/get-certificates.asciidoc[] include::security/put-role-mapping.asciidoc[] include::security/get-role-mappings.asciidoc[] @@ -438,5 +444,8 @@ The Java High Level REST Client supports the following Index Lifecycle Management APIs: * <<{upid}-ilm-put-lifecycle-policy>> +* <<{upid}-ilm-get-lifecycle-policy>> include::ilm/put_lifecycle_policy.asciidoc[] +include::ilm/get_lifecycle_policy.asciidoc[] + diff --git a/docs/painless/painless-contexts/painless-filter-context.asciidoc b/docs/painless/painless-contexts/painless-filter-context.asciidoc index 96fddf13b500d..bf4741cfc02fc 100644 --- a/docs/painless/painless-contexts/painless-filter-context.asciidoc +++ b/docs/painless/painless-contexts/painless-filter-context.asciidoc @@ -23,4 +23,43 @@ query to include and exclude documents. *API* -The standard <> is available. \ No newline at end of file +The standard <> is available. + +*Example* + +To run this example, first follow the steps in +<>. + +This script finds all unsold documents that cost less than $18. + +[source,Painless] +---- +doc['sold'].value == false && doc['cost'].value < 18 +---- + +Defining cost as a script parameter enables the cost to be configured +in the script query request. For example, the following request finds +all available theatre seats for evening performances that are under $18. + +[source,js] +---- +GET evening/_search +{ + "query": { + "bool" : { + "filter" : { + "script" : { + "script" : { + "source" : "doc['sold'].value == false && doc['cost'].value < params.cost", + "params" : { + "cost" : 18 + } + } + } + } + } + } +} +---- +// CONSOLE +// TEST[skip: requires setup from other pages] \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc b/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc index b2ffb63fd7aea..cd476481381a6 100644 --- a/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc +++ b/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc @@ -25,4 +25,52 @@ results. *API* -The standard <> is available. \ No newline at end of file +The standard <> is available. + +*Example* + +To run this example, first follow the steps in +<>. + +Imagine that you want to find seats to performances by your favorite +actors. You have a list of favorite actors in mind, and you want +to find performances where the cast includes at least a certain +number of them. `terms_set` query with `minimum_should_match_script` +is a way to accomplish this. To make the query request more configurable, +you can define `min_actors_to_see` as a script parameter. + +To ensure that the parameter `min_actors_to_see` doesn't exceed +the number of favorite actors, you can use `num_term`s to get +the number of actors in the list and `Math.min` to get the lesser +of the two. + +[source,Painless] +---- +Math.min(params['num_terms'], params['min_actors_to_see']) +---- + +The following request finds seats to performances with at least +two of the three specified actors. + +[source,js] +---- +GET seats/_search +{ + "query" : { + "terms_set": { + "actors" : { + "terms" : ["smith", "earns", "black"], + "minimum_should_match_script": { + "source": "Math.min(params['num_terms'], params['min_actors_to_see'])", + "params" : { + "min_actors_to_see" : 2 + } + } + } + } + } +} +---- +// CONSOLE +// TEST[skip: requires setup from other pages] + diff --git a/docs/plugins/ingest-geoip.asciidoc b/docs/plugins/ingest-geoip.asciidoc index c7cb308932c73..a255d0b51217a 100644 --- a/docs/plugins/ingest-geoip.asciidoc +++ b/docs/plugins/ingest-geoip.asciidoc @@ -155,10 +155,12 @@ PUT _ingest/pipeline/geoip } ] } + PUT my_index/_doc/my_id?pipeline=geoip { "ip": "80.231.5.0" } + GET my_index/_doc/my_id -------------------------------------------------- // CONSOLE @@ -168,18 +170,128 @@ Which returns: [source,js] -------------------------------------------------- { - "found": true, - "_index": "my_index", - "_type": "_doc", - "_id": "my_id", - "_version": 1, - "_source": { - "ip": "80.231.5.0" + "_index" : "my_index", + "_type" : "_doc", + "_id" : "my_id", + "_version" : 1, + "found" : true, + "_source" : { + "ip" : "80.231.5.0" } } -------------------------------------------------- // TESTRESPONSE +[[ingest-geoip-mappings-note]] +===== Recognizing Location as a Geopoint +Although this plugin enriches your document with a `location` field containing +the estimated latitude and longitude of the IP address, this field will not be +indexed as a {ref}/geo-point.html[`geo_point`] type in Elasticsearch without explicitely defining it +as such in the mapping. + +You can use the following mapping for the example index above: + +[source,js] +-------------------------------------------------- +PUT my_ip_locations +{ + "mappings": { + "_doc": { + "properties": { + "geoip": { + "properties": { + "location": { "type": "geo_point" } + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +//// +[source,js] +-------------------------------------------------- +PUT _ingest/pipeline/geoip +{ + "description" : "Add geoip info", + "processors" : [ + { + "geoip" : { + "field" : "ip" + } + } + ] +} + +PUT my_ip_locations/_doc/1?refresh=true&pipeline=geoip +{ + "ip": "8.8.8.8" +} + +GET /my_ip_locations/_search +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geo_distance" : { + "distance" : "1m", + "geoip.location" : { + "lon" : -97.822, + "lat" : 37.751 + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,js] +-------------------------------------------------- +{ + "took" : 3, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : 1, + "max_score" : 1.0, + "hits" : [ + { + "_index" : "my_ip_locations", + "_type" : "_doc", + "_id" : "1", + "_score" : 1.0, + "_source" : { + "geoip" : { + "continent_name" : "North America", + "country_iso_code" : "US", + "location" : { + "lon" : -97.822, + "lat" : 37.751 + } + }, + "ip" : "8.8.8.8" + } + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took" : 3/"took" : $body.took/] +//// + [[ingest-geoip-settings]] ===== Node Settings diff --git a/docs/reference/ccr/apis/get-ccr-stats.asciidoc b/docs/reference/ccr/apis/get-ccr-stats.asciidoc index 4ae864dd2c9ff..630082fdc9bab 100644 --- a/docs/reference/ccr/apis/get-ccr-stats.asciidoc +++ b/docs/reference/ccr/apis/get-ccr-stats.asciidoc @@ -141,7 +141,10 @@ The API returns the following results: } } -------------------------------------------------- +// TESTRESPONSE[s/"number_of_failed_follow_indices" : 0/"number_of_failed_follow_indices" : $body.auto_follow_stats.number_of_failed_follow_indices/] +// TESTRESPONSE[s/"number_of_failed_remote_cluster_state_requests" : 0/"number_of_failed_remote_cluster_state_requests" : $body.auto_follow_stats.number_of_failed_remote_cluster_state_requests/] // TESTRESPONSE[s/"number_of_successful_follow_indices" : 1/"number_of_successful_follow_indices" : $body.auto_follow_stats.number_of_successful_follow_indices/] +// TESTRESPONSE[s/"recent_auto_follow_errors" : [ ]/"recent_auto_follow_errors" : $body.auto_follow_stats.recent_auto_follow_errors/] // TESTRESPONSE[s/"leader_global_checkpoint" : 1024/"leader_global_checkpoint" : $body.follow_stats.indices.0.shards.0.leader_global_checkpoint/] // TESTRESPONSE[s/"leader_max_seq_no" : 1536/"leader_max_seq_no" : $body.follow_stats.indices.0.shards.0.leader_max_seq_no/] // TESTRESPONSE[s/"follower_global_checkpoint" : 768/"follower_global_checkpoint" : $body.follow_stats.indices.0.shards.0.follower_global_checkpoint/] diff --git a/docs/reference/ilm/apis/delete-lifecycle.asciidoc b/docs/reference/ilm/apis/delete-lifecycle.asciidoc index 510d02ae579d3..adb1112961e47 100644 --- a/docs/reference/ilm/apis/delete-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/delete-lifecycle.asciidoc @@ -6,6 +6,8 @@ Delete Policy ++++ +beta[] + Deletes a lifecycle policy. ==== Request diff --git a/docs/reference/ilm/apis/explain.asciidoc b/docs/reference/ilm/apis/explain.asciidoc index 31d89a5bf0d7d..fb8d669f70e52 100644 --- a/docs/reference/ilm/apis/explain.asciidoc +++ b/docs/reference/ilm/apis/explain.asciidoc @@ -6,6 +6,8 @@ Explain Lifecycle ++++ +beta[] + Shows an index's current lifecycle status. ==== Request @@ -107,10 +109,7 @@ that the index is managed and in the `new` phase: } -------------------------------------------------- // CONSOLE -// TESTRESPONSE[s/"lifecycle_date_millis": 1538475653281/"lifecycle_date_millis": $body.indices.my_index.lifecycle_date_millis/] -// TESTRESPONSE[s/"phase_time_millis": 1538475653317/"phase_time_millis": $body.indices.my_index.phase_time_millis/] -// TESTRESPONSE[s/"action_time_millis": 1538475653317/"action_time_millis": $body.indices.my_index.action_time_millis/] -// TESTRESPONSE[s/"step_time_millis": 1538475653317/"step_time_millis": $body.indices.my_index.step_time_millis/] +// TESTRESPONSE[skip:no way to know if we will get this response immediately] <1> Shows if the index is being managed by ILM. If the index is not managed by ILM the other fields will not be shown <2> The name of the policy which ILM is using for this index @@ -140,7 +139,7 @@ phase completes. "action": "rollover", "action_time_millis": 1538475653317, "action_time": "2018-10-15T13:45:22.577Z", - "step": "attempt_rollover", + "step": "attempt-rollover", "step_time_millis": 1538475653317, "step_time": "2018-10-15T13:45:22.577Z", "phase_execution": { @@ -251,7 +250,7 @@ the step that failed and the step info provides information about the error. "step": "ERROR", "step_time_millis": 1538475653317, "step_time": "2018-10-15T13:45:22.577Z", - "failed_step": "attempt_rollover", <1> + "failed_step": "attempt-rollover", <1> "step_info": { <2> "type": "resource_already_exists_exception", "reason": "index [test-000057/H7lF9n36Rzqa-KfKcnGQMg] already exists", diff --git a/docs/reference/ilm/apis/get-lifecycle.asciidoc b/docs/reference/ilm/apis/get-lifecycle.asciidoc index 4b9b2a3bd3b60..c2c76650eeae3 100644 --- a/docs/reference/ilm/apis/get-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/get-lifecycle.asciidoc @@ -6,6 +6,8 @@ Get Policy ++++ +beta[] + Retrieves a lifecycle policy. ==== Request diff --git a/docs/reference/ilm/apis/get-status.asciidoc b/docs/reference/ilm/apis/get-status.asciidoc index 4406b805fb5d9..a2c1a1ade99f9 100644 --- a/docs/reference/ilm/apis/get-status.asciidoc +++ b/docs/reference/ilm/apis/get-status.asciidoc @@ -6,6 +6,8 @@ Get {ILM} Status ++++ +beta[] + Retrieves the current {ilm} status. ==== Request diff --git a/docs/reference/ilm/apis/ilm-api.asciidoc b/docs/reference/ilm/apis/ilm-api.asciidoc index 351ff44f41fe2..0f5c9a18eeeeb 100644 --- a/docs/reference/ilm/apis/ilm-api.asciidoc +++ b/docs/reference/ilm/apis/ilm-api.asciidoc @@ -1,6 +1,8 @@ [[index-lifecycle-management-api]] == Index Lifecycle Management API +beta[] + You can use the following APIs to manage policies on indices. [float] diff --git a/docs/reference/ilm/apis/move-to-step.asciidoc b/docs/reference/ilm/apis/move-to-step.asciidoc index 854b1ab317d1b..e265761fc328a 100644 --- a/docs/reference/ilm/apis/move-to-step.asciidoc +++ b/docs/reference/ilm/apis/move-to-step.asciidoc @@ -6,6 +6,8 @@ Move to Step ++++ +beta[] + Triggers execution of a specific step in the lifecycle policy. ==== Request diff --git a/docs/reference/ilm/apis/put-lifecycle.asciidoc b/docs/reference/ilm/apis/put-lifecycle.asciidoc index 565598e75cdd8..c436c8d4b6201 100644 --- a/docs/reference/ilm/apis/put-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/put-lifecycle.asciidoc @@ -6,6 +6,8 @@ Create Policy ++++ +beta[] + Creates or updates lifecycle policy. ==== Request diff --git a/docs/reference/ilm/apis/remove-policy.asciidoc b/docs/reference/ilm/apis/remove-policy.asciidoc index 2811bc476e623..d05c735c1843b 100644 --- a/docs/reference/ilm/apis/remove-policy.asciidoc +++ b/docs/reference/ilm/apis/remove-policy.asciidoc @@ -6,6 +6,8 @@ Delete Policy ++++ +beta[] + Removes the assigned lifecycle policy from an index. ==== Request diff --git a/docs/reference/ilm/apis/retry-policy.asciidoc b/docs/reference/ilm/apis/retry-policy.asciidoc index 51882c543e283..7052546ce6a7b 100644 --- a/docs/reference/ilm/apis/retry-policy.asciidoc +++ b/docs/reference/ilm/apis/retry-policy.asciidoc @@ -6,6 +6,8 @@ Retry Policy ++++ +beta[] + Retry executing the policy for an index that is in the ERROR step. ==== Request diff --git a/docs/reference/ilm/apis/start.asciidoc b/docs/reference/ilm/apis/start.asciidoc index 6f2dffe9c3398..f5c29bf84eaed 100644 --- a/docs/reference/ilm/apis/start.asciidoc +++ b/docs/reference/ilm/apis/start.asciidoc @@ -6,6 +6,8 @@ Start {ILM} ++++ +beta[] + Start the {ILM} plugin. ==== Request diff --git a/docs/reference/ilm/apis/stop.asciidoc b/docs/reference/ilm/apis/stop.asciidoc index f1ffbc09c4461..eaefb7783cadb 100644 --- a/docs/reference/ilm/apis/stop.asciidoc +++ b/docs/reference/ilm/apis/stop.asciidoc @@ -6,6 +6,8 @@ Stop {ILM} ++++ +beta[] + Stop the {ILM} plugin. ==== Request diff --git a/docs/reference/ilm/error-handling.asciidoc b/docs/reference/ilm/error-handling.asciidoc index 8f0e4d6e4c0fc..07e018422c2d6 100644 --- a/docs/reference/ilm/error-handling.asciidoc +++ b/docs/reference/ilm/error-handling.asciidoc @@ -3,6 +3,8 @@ [[index-lifecycle-error-handling]] == Index Lifecycle Error Handling +beta[] + During Index Lifecycle Management's execution of the policy for an index, it's possible for a step to encounter an error during its execution. When this happens, ILM will move the management state into an "error" step. This halts @@ -142,6 +144,7 @@ PUT _ilm/policy/shrink-the-index // CONSOLE // TEST[continued] +[float] === Retrying failed index lifecycle management steps Once the underlying issue that caused an index to move to the error step has diff --git a/docs/reference/ilm/getting-started-ilm.asciidoc b/docs/reference/ilm/getting-started-ilm.asciidoc index ad3596e92a1ad..401883d6a7c11 100644 --- a/docs/reference/ilm/getting-started-ilm.asciidoc +++ b/docs/reference/ilm/getting-started-ilm.asciidoc @@ -1,15 +1,232 @@ [role="xpack"] +[testenv="basic"] [[getting-started-index-lifecycle-management]] == Getting started with {ilm} -Create a policy that rolls over after 1 day deletes an index after 30 days +beta[] -Show create policy API req/res +Let's jump into {ILM} by working through a hands-on scenario. +This section will leverage many new concepts unique to {ILM} that +you may not be familiar with. The following sections will explore +these in more details. -Show assign policy to index API req/res +The goal of this example is to set up a set of indices that will encapsulate +the data from a time series data source. We can imagine there is a system +like {filebeat-ref}[Filebeat] that continuously indexes documents into +our writing index. We wish to roll over the index after it reaches a size +of 50 gigabytes, or has been created 30 days ago, and then delete the index +after 90 days. -Show both the API and how it is done with `index.lifecyce.name` using the -create-index API +=== Setting up a new policy -Show explain API to show current state, but ignore the “step” related info, -only focus on managed/phase/action +beta[] + +There are many new features introduced by {ILM}, but we will only focus on +a few that are needed for our example. For starters, we will use the +<> API to define our first policy. Lifecycle +policies are defined in JSON and include specific +<>. + +[source,js] +------------------------ +PUT _ilm/policy/datastream_policy <1> +{ + "policy": { <2> + "phases": { + "hot": { <3> + "actions": { + "rollover": { <4> + "max_size": "50GB", + "max_age": "30d" + } + } + }, + "delete": { + "min_age": "90d", <5> + "actions": { + "delete": {} <6> + } + } + } + } +} +------------------------ +// CONSOLE +// TEST +<1> call to the <> endpoint to create + a new policy named "datastream_policy" +<2> policy definition sub-object +<3> the hot phase defined in the "phases" section. Optional `min_age` field + not defined -- defaults to `0ms` +<4> rollover action definition +<5> delete phase begins after 90 days +<6> delete action definition + + +Here we created the policy called `datastream_policy` which rolls over +the index being written to after it reaches 50 gigabytes, or it is 30 +days old. The rollover will occur when either of these conditions is true. +The index will be deleted 90 days after it is rolled over. + +=== Applying a policy to our index + +beta[] + +There are <> to associate a +policy to an index. Since we wish specific settings to be applied to +the new index created from Rollover, we will set the policy via +index templates. + + +[source,js] +----------------------- +PUT _template/datastream_template +{ + "index_patterns": ["datastream-*"], <1> + "settings": { + "number_of_shards": 1, + "number_of_replicas": 1, + "index.lifecycle.name": "datastream_policy", <2> + "index.lifecycle.rollover_alias": "datastream" <3> + } +} +----------------------- +// CONSOLE +// TEST[continued] +<1> match all indices starting with "datastream-". These will include all + newly created indices from actions like rollover +<2> the name of the lifecycle policy managing the index +<3> alias to use for the rollover action, required since a rollover action is + defined in the policy. + +The above index template introduces a few new settings specific to {ILM}. +The first being `index.lifecycle.name`. This setting will configure +the "datastream_policy" to the index applying this template. This means +that all newly created indices prefixed "datastream-" will be managed by +our policy. The other setting used here is `index.lifecycle.rollover_alias`. +This setting is required when using a policy containing the rollover +action and specifies which alias to rollover on behalf of this index. +The intention here is that the rollover alias is also defined on the index. + +To begin, we will want to bootstrap our first index to write to. + + +[source,js] +----------------------- +PUT datastream-000001 +{ + "aliases": { + "datastream": { + "is_write_index": true + } + } +} +----------------------- +// CONSOLE +// TEST[continued] + +When creating our index, we have to consider a few important configurations +that tie our index and our policy together correctly. We need to make sure +that our index name matches our index template pattern of "datastream-*", +which it does. We are using the <> in our policy, which +requires that our index name ends with a number. In our case, we used +`000001`. This is important so that Rollover can increment this number when +naming the new index created from rolling over. + +Our index creation request leverages its template to apply our settings, +but we must also configure our rollover alias: "datastream". To do this, +we take advantage of <>. This is a way +to define an alias to be used for both reading and writing, with only one +index being the index that is being written to at a time. Rollover swaps +the write index to be the new index created from rollover, and sets the +alias to be read-only for the source index. + +=== Checking progress + +beta[] + +Now that we have an index managed by our policy, how do we tell what is going +on? Which phase are we in? Is something broken? This section will go over a +few APIs and their responses to help us inspect our indices with respect +to {ILM}. + +With the help of the <>, we can know +things like which phase we're in and when we entered that phase. The API +will also provide further info if errors occurred, or if we are blocked on +certain checks within actions. + +[source,js] +-------------------------------------------------- +GET datastream-*/_ilm/explain +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +The above request will retrieve {ILM} execution information for all our +managed indices. + + +[source,js] +-------------------------------------------------- +{ + "indices": { + "datastream-000001": { + "index": "datastream-000001", + "managed": true, <1> + "policy": "datastream_policy", <2> + "lifecycle_date_millis": 1538475653281, + "phase": "hot", <3> + "phase_time_millis": 1538475653317, + "action": "rollover", <4> + "action_time_millis": 1538475653317, + "step": "attempt-rollover", <5> + "step_time_millis": 1538475653317, + "phase_execution": { + "policy": "datastream_policy", + "phase_definition": { <6> + "min_age": "0ms", + "actions": { + "rollover": { + "max_size": "50gb", + "max_age": "30d" + } + } + }, + "version": 1, <7> + "modified_date_in_millis": 1539609701576 + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[skip:no way to know if we will get this response immediately] +<1> this index is managed by ILM +<2> the policy in question, in this case, "datastream_policy" +<3> what phase the index is currently in +<4> what action the index is currently on +<5> what step the index is currently on +<6> the definition of the phase + (in this case, the "hot" phase) that the index is currently on +<7> the version of the policy being used to execute the current phase + +You can read about the full details of this response in the +<>. For now, let's focus on how +the response details which phase, action, and step we're in. We are in the +"hot" phase, and "rollover" action. Rollover will continue to be called +by {ILM} until its conditions are met and it rolls over the index. +Afterwards, the original index will stay in the hot phase until 90 more +days pass and it is deleted in the delete phase. +As time goes on, new indices will be created and deleted. +With `datastream-000002` being created when the index mets the rollover +conditions and `datastream-000003` created after that. We will be able +to search across all of our managed indices using the "datastream" alias, +and we will be able to write to our to-be-rolled-over write indices using +that same alias. + + + +That's it! We have our first use-case managed by {ILM}. + +To learn more about all our APIs, +check out <>. diff --git a/docs/reference/ilm/index.asciidoc b/docs/reference/ilm/index.asciidoc index a542aa610943e..b568203a6a220 100644 --- a/docs/reference/ilm/index.asciidoc +++ b/docs/reference/ilm/index.asciidoc @@ -8,6 +8,8 @@ :ILM: ILM [partintro] -- +beta[] + The <> enable you to automate how you want to manage your indices over time. Rather than simply performing management actions on your indices on a set schedule, you can base actions on other factors @@ -52,13 +54,15 @@ hardware. include::getting-started-ilm.asciidoc[] -include::using-policies-rollover.asciidoc[] +include::policy-definitions.asciidoc[] include::set-up-lifecycle-policy.asciidoc[] +include::using-policies-rollover.asciidoc[] + include::update-lifecycle-policy.asciidoc[] -include::get-index-lifecycle-information.asciidoc[] +// include::get-index-lifecycle-information.asciidoc[] include::error-handling.asciidoc[] diff --git a/docs/reference/ilm/policy-definitions.asciidoc b/docs/reference/ilm/policy-definitions.asciidoc new file mode 100644 index 0000000000000..4c083511be3ec --- /dev/null +++ b/docs/reference/ilm/policy-definitions.asciidoc @@ -0,0 +1,558 @@ +beta[] +[role="xpack"] +[testenv="basic"] +[[ilm-policy-definition]] +== Policy Phases and Actions + +beta[] + +There are four stages in the index lifecycle, in the order +they are executed. + +[options="header"] +|====== +| Name | Description +| `hot` | The index is actively being written to +| `warm` | The index is generally not being written to, but is still queried +| `cold` | The index is no longer being updated and is seldom queried. The +information still needs to be searchable, but it's okay if those queries are +slower. +| `delete` | The index is no longer needed and can safely be deleted +|====== + +Each of these stages is called a "phase". A policy does not need to configure +each phase for an index. For example, one policy may define only the hot +phase and the delete phase, while another may define all four phases. + +=== Timing + +beta[] + +Indices enter phases based on a phase's `min_age` parameter. +The index will not enter the phase until the index's age is older than that +of the `min_age`. The parameter is configured using a time +duration format (see <>). + +`min_age` defaults to zero seconds `0s` for each phase if not specified. + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "1d", + "actions": { + "allocate": { + "number_of_replicas": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +The Above example configures a policy that moves the index into the warm +phase after one day. Until then, the index is in a waiting state. After +moving into the warm phase, it will wait until 30 days have elapsed before +moving to the delete phase and deleting the index. + +`min_age` is usually the time elapsed from the time the index is created. If the +index is rolled over, then `min_age` is the time elapsed from the time the index +is rolled over. The intention here is to execute following phases and actions +relative to when data was written last to a rolled over index. + +=== Actions + +beta[] + +The below list shows the actions which are available in each phase. + +* Hot + - <> +* Warm + - <> + - <> + - <> + - <> +* Cold + - <> +* Delete + - <> + +[[ilm-allocate-action]] +==== Allocate + +Phases allowed: warm, cold. + +The Allocate action allows you to specify which nodes are allowed to host the +shards of the index and set the number of replicas. +Behind the scenes, it is modifying the index settings +for shard filtering and/or replica counts. When updating the number of replicas, +configuring allocation rules is optional. When configuring allocation rules, +setting number of replicas is optional. Although this action can be treated as +two separate index settings updates, both can be configured at once. + +Read more about index replicas <>. +Read more about shard allocation filtering in +the <>. + +[[ilm-allocate-options]] +.Allocate Options +[options="header"] +|====== +| Name | Required | Default | Description +| `number_of_replicas` | no | - | The number of replicas to + assign to the index +| `include` | no | - | assigns an index to nodes + having at least _one_ of the attributes +| `exclude` | no | - | assigns an index to nodes having + _none_ of the attributes +| `require` | no | - | assigns an index to nodes having + _all_ of the attributes +|====== + +If `number_of_replicas` is not configured, then at least one of `include`, +`exclude`, and `require` is required. An empty Allocate Action with no configuration +is invalid. + +===== Example: Change number of replicas + +In this example, the index's number of replicas is changed to `2`, while allocation +rules are unchanged. + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "actions": { + "allocate" : { + "number_of_replicas" : 2 + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +===== Example: Assign index to node with specific "box_type" attribute + +This example assigns the index to nodes with `box_type` attribute of "hot" or "warm". + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "actions": { + "allocate" : { + "include" : { + "box_type": "hot,warm" + } + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +===== Example: Assign index to a specific node and update replica settings + +This example updates the index to have one replica per shard and be allocated +to nodes with a `box_type` attribute of "cold". + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "actions": { + "allocate" : { + "number_of_replicas": 1, + "require" : { + "box_type": "cold" + } + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +[[ilm-delete-action]] +==== Delete + +Phases allowed: delete. + +The Delete Action does just that, it deletes the index. + +This action does not have any options associated with it. + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "delete": { + "actions": { + "delete" : { } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +[[ilm-forcemerge-action]] +==== Force Merge + +Phases allowed: warm. + +NOTE: Index will be be made read-only when this action is run +(see: <>) + +The Force Merge Action <> the index into at +most a specific number of <>. + +[[ilm-forcemerge-options]] +.Force Merge Options +[options="header"] +|====== +| Name | Required | Default | Description +| `max_num_segments` | yes | - | The number of + segments to merge to. + To fully merge the + index, set it to `1` +|====== + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "actions": { + "forcemerge" : { + "max_num_segments": 1 + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +[[ilm-readonly-action]] +==== Read-Only + +Phases allowed: warm. + +This action will set the index to be read-only +(see: <>) + +This action does not have any options associated with it. + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "actions": { + "readonly" : { } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +[[ilm-rollover-action]] +==== Rollover + +Phases allowed: hot. + +[WARNING] +index format must match pattern '^.*-\\d+$', for example (`logs-000001`). +[WARNING] +The managed index must set `index.lifecycle.rollover_alias` as the +alias to rollover. The index must also be the write index for the alias. + +For example, if an index to be managed has an alias `my_data`. The managed +index "my_index" must be the write index for the alias. For more information, read +<>. + +[source,js] +-------------------------------------------------- +PUT my_index +{ + "settings": { + "index.lifecycle.name": "my_policy", + "index.lifecycle.rollover_alias": "my_data" + }, + "aliases": { + "my_data": { + "is_write_index": true + } + } +} +-------------------------------------------------- +// CONSOLE + +The Rollover Action rolls an alias over to a new index when the +existing index meets one of the rollover conditions. + + +[[ilm-rollover-options]] +.Rollover Options +[options="header"] +|====== +| Name | Required | Default | Description +| `max_size` | no | - | max index storage size. + See <> + for formatting +| `max_docs` | no | - | max number of documents an + index is to contain before + rolling over. +| `max_age` | no | - | max time elapsed from index + creation. See + <> + for formatting +|====== + +At least one of `max_size`, `max_docs`, `max_age` or any combinations of the +three are required to be specified. + +===== Example: Rollover when index is too large + +This example rolls the index over when it is at least 100 gigabytes. + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover" : { + "max_size": "100GB" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +===== Example: Rollover when index has too many documents + +This example rolls the index over when it contains at least +1000000 documents. + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover" : { + "max_docs": 1000000 + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +===== Example: Rollover when index is too old + +This example rolls the index over when it has been created at least +7 days ago. + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover" : { + "max_age": "7d" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +===== Example: Rollover when index is too old or too large + +This example rolls the index over when it has been created at least +7 days ago or it is at least 100 gigabytes. In this case, the index will be +rolled over when any of the conditions is met. + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover" : { + "max_age": "7d", + "max_size": "100GB" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + + +[[ilm-shrink-action]] +==== Shrink + +NOTE: Index will be be made read-only when this action is run +(see: <>) + +This action shrinks an existing index into a new index with fewer primary +shards. It calls the <> to shrink the index. +Since allocating all the primary shards of the index to one node is a +prerequisite, this action will first allocate the primary shards to a valid +node. After shrinking, it will swap aliases pointing to the original index +into the new shrunken index. The new index will also have a new name: +"shrink-". So if the original index was called "logs", +then the new index will be named "shrink-logs". + +[[ilm-shrink-options]] +.Shrink Options +[options="header"] +|====== +| Name | Required | Default | Description +| `number_of_shards` | yes | - | The number of shards + to shrink to. must be + a factor of the number + of shards in the + source index. +|====== + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "actions": { + "shrink" : { + "number_of_shards": 1 + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + + +=== Full Policy + +beta[] + +With all of these actions, we can support complex management strategies for our +indices. This policy will define an index that will start in the hot phase, +rolling over every 20g or 7 days. After 30 days it enters the warm phase +and increases the replicas to 2, force merges and shrinks. After 60 days +it enters the cold phase and allocates to "cold" nodes, and after 90 days the +index is deleted. + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/full_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_age": "7d", + "max_size": "20G" + } + } + }, + "warm": { + "min_age": "30d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + }, + "shrink": { + "number_of_shards": 1 + }, + "allocate": { + "number_of_replicas": 2 + } + } + }, + "cold": { + "min_age": "60d", + "actions": { + "allocate": { + "require": { + "type": "cold" + } + } + } + }, + "delete": { + "min_age": "90d", + "actions": { + "delete": {} + } + } + } + } +} +-------------------------------------------------- +// CONSOLE diff --git a/docs/reference/ilm/set-up-lifecycle-policy.asciidoc b/docs/reference/ilm/set-up-lifecycle-policy.asciidoc index 7f5bb84c598a4..11643bdfc0674 100644 --- a/docs/reference/ilm/set-up-lifecycle-policy.asciidoc +++ b/docs/reference/ilm/set-up-lifecycle-policy.asciidoc @@ -3,6 +3,8 @@ [[set-up-lifecycle-policy]] == Set up {ilm} policy +beta[] + In order for an index to use an {ilm} policy to manage its lifecycle we must first define a lifecycle policy for it to use. The following request creates a policy called `my_policy` in Elasticsearch which we can later use to manage @@ -30,12 +32,12 @@ PUT _ilm/policy/my_policy } } } ------------------------- +------------------------ // CONSOLE <1> Rollover the index when it reaches 25GB in size <2> Delete the index when its 30 days old -{ilm} will manage an index using the policy defined in the +{ilm} will manage an index using the policy defined in the `index.lifecycle.name` index setting. If this setting does not exist in the settings for a particular index {ilm} will not manage that index. @@ -45,6 +47,8 @@ To set the policy for an index there are two options: === Applying a policy to an index template +beta[] + The `index.lifecycle.name` setting can be set in an index template so that it is automatically applied to indexes matching the templates index pattern: @@ -89,6 +93,8 @@ create a new index and roll the alias over to use the new index automatically. === Apply a policy to a create index request +beta[] + The `index.lifecycle.name` setting can be set on an individual create index request so {ilm} immediately starts managing the index: @@ -109,4 +115,3 @@ IMPORTANT: Its recommended not to use the create index API with a policy that defines a rollover action. If you do so, the new index as the result of the rollover will not carry forward the policy. Always use index templates to define policies with rollover actions. - diff --git a/docs/reference/ilm/start-stop-ilm.asciidoc b/docs/reference/ilm/start-stop-ilm.asciidoc index 938b97d44721f..516f85a0e39d2 100644 --- a/docs/reference/ilm/start-stop-ilm.asciidoc +++ b/docs/reference/ilm/start-stop-ilm.asciidoc @@ -3,6 +3,8 @@ [[start-stop-ilm]] == Start And Stop {ilm} +beta[] + All indices that are managed by ILM will continue to execute their policies. There may be times when this is not desired on certain indices, or maybe even all the indices in a cluster. For example, @@ -77,7 +79,8 @@ The operating modes of ILM: |STOPPED |This represents a state where no policies are executed |=== -=== Stopping ILM +[float] +=== Stopping ILM= The ILM service can be paused such that no further steps will be executed using the <>. @@ -134,6 +137,7 @@ GET _ilm/status // CONSOLE // TESTRESPONSE +[float] === Starting ILM To start ILM and continue executing policies, use the <>. diff --git a/docs/reference/ilm/update-lifecycle-policy.asciidoc b/docs/reference/ilm/update-lifecycle-policy.asciidoc index 97a3131241e22..cbe82412c9415 100644 --- a/docs/reference/ilm/update-lifecycle-policy.asciidoc +++ b/docs/reference/ilm/update-lifecycle-policy.asciidoc @@ -6,6 +6,8 @@ Update Policy ++++ +beta[] + You can update an existing lifecycle policy to fix mistakes or change strategies for newly created indices. It is possible to update policy definitions and an index's `index.lifecycle.name` settings independently. To prevent the situation @@ -17,6 +19,8 @@ their effects on policy execution on indices. === Updates to policies not managing indices +beta[] + Indices not referencing an existing policy that is updated will not be affected. If an index is assigned to the policy, it will be assigned the latest version of that policy @@ -130,6 +134,8 @@ the policy. === Updates to executing policies +beta[] + Indices preserve the phase definition from the latest policy version that existed at the time that it entered that phase. Changes to the currently-executing phase within policy updates will not be reflected during execution. This means that updates to the `hot` phase, for example, will not affect @@ -177,7 +183,7 @@ PUT my_index //// The <> is useful to introspect managed indices to see which phase definition they are currently executing. -Using this API, we can find out that `my_index` is currently attempting to be rolled over. +Using this API, we can find out that `my_index` is currently checking if it is ready to be rolled over. [source,js] -------------------------------------------------- @@ -199,7 +205,7 @@ GET my_index/_ilm/explain "phase_time_millis": 1538475653317, "action": "rollover", "action_time_millis": 1538475653317, - "step": "attempt_rollover", + "step": "check-rollover-ready", "step_time_millis": 1538475653317, "phase_execution": { "policy": "my_executing_policy", @@ -219,11 +225,7 @@ GET my_index/_ilm/explain } -------------------------------------------------- // CONSOLE -// TESTRESPONSE[s/"lifecycle_date_millis": 1538475653281/"lifecycle_date_millis": $body.indices.my_index.lifecycle_date_millis/] -// TESTRESPONSE[s/"phase_time_millis": 1538475653317/"phase_time_millis": $body.indices.my_index.phase_time_millis/] -// TESTRESPONSE[s/"action_time_millis": 1538475653317/"action_time_millis": $body.indices.my_index.action_time_millis/] -// TESTRESPONSE[s/"step_time_millis": 1538475653317/"step_time_millis": $body.indices.my_index.step_time_millis/] -// TESTRESPONSE[s/"modified_date_in_millis": 1538475653317/"modified_date_in_millis": $body.indices.my_index.phase_execution.modified_date_in_millis/] +// TESTRESPONSE[skip:no way to know if we will get this response immediately] Updating `my_executing_policy` to have no rollover action and, instead, go directly into a newly introduced `warm` phase. @@ -279,7 +281,7 @@ GET my_index/_ilm/explain "phase_time_millis": 1538475653317, "action": "rollover", "action_time_millis": 1538475653317, - "step": "attempt_rollover", + "step": "check-rollover-ready", "step_time_millis": 1538475653317, "phase_execution": { "policy": "my_executing_policy", @@ -362,6 +364,8 @@ GET my_index/_ilm/explain === Switching policies for an index +beta[] + Setting `index.lifecycle.name` to a different policy behaves much like a policy update, but instead of just switching to a different version, it switches to a different policy. diff --git a/docs/reference/ilm/using-policies-rollover.asciidoc b/docs/reference/ilm/using-policies-rollover.asciidoc index f7982af4fec81..ec3eb01fb64d1 100644 --- a/docs/reference/ilm/using-policies-rollover.asciidoc +++ b/docs/reference/ilm/using-policies-rollover.asciidoc @@ -3,6 +3,8 @@ [[using-policies-rollover]] == Using policies to manage index rollover +beta[] + The rollover action enables you to automatically roll over to a new index based on the index size, document count, or age. When a rollover is triggered, a new index is created, the write alias is updated to point to the new index, and all diff --git a/docs/reference/ml/apis/datafeedresource.asciidoc b/docs/reference/ml/apis/datafeedresource.asciidoc index 6fe0b35d95185..73361b12454b9 100644 --- a/docs/reference/ml/apis/datafeedresource.asciidoc +++ b/docs/reference/ml/apis/datafeedresource.asciidoc @@ -64,6 +64,11 @@ A {dfeed} resource has the following properties: example: `[]`. This property is provided for backwards compatibility with releases earlier than 6.0.0. For more information, see <>. +`delayed_data_check_config`:: + (object) Specifies if and with how large a window should the data feed check + for missing data. See <>. + For example: `{"enabled": true, "check_window": "1h"}` + [[ml-datafeed-chunking-config]] ==== Chunking Configuration Objects @@ -86,6 +91,27 @@ A chunking configuration object has the following properties: This setting is only applicable when the mode is set to `manual`. For example: `3h`. +[[ml-datafeed-delayed-data-check-config]] +==== Delayed Data Check Configuration Objects + +The {dfeed} can optionally search over indices that have already been read in +an effort to find if any data has since been added to the index. If missing data +is found, it is a good indication that the `query_delay` option is set too low and +the data is being indexed after the {dfeed} has passed that moment in time. + +This check only runs on real-time {dfeeds} + +The configuration object has the following properties: + +`enabled`:: + (boolean) Should the {dfeed} periodically check for data being indexed after reading. + Defaults to `true` + +`check_window`:: + (time units) The window of time before the latest finalized bucket that should be searched + for late data. Defaults to `null` which causes an appropriate `check_window` to be calculated + when the real-time {dfeed} runs. + [float] [[ml-datafeed-counts]] ==== {dfeed-cap} Counts diff --git a/docs/reference/ml/apis/put-datafeed.asciidoc b/docs/reference/ml/apis/put-datafeed.asciidoc index b5c99fc8e36af..223b88760be52 100644 --- a/docs/reference/ml/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/apis/put-datafeed.asciidoc @@ -78,6 +78,10 @@ You must create a job before you create a {dfeed}. You can associate only one For example: `[]`. This property is provided for backwards compatibility with releases earlier than 6.0.0. For more information, see <>. +`delayed_data_check_config`:: + (object) Specifies if and with how large a window should the data feed check + for missing data. See <>. + For more information about these properties, see <>. diff --git a/docs/reference/query-dsl/boosting-query.asciidoc b/docs/reference/query-dsl/boosting-query.asciidoc index 5bb07392ab78d..5cd12ce1f00b7 100644 --- a/docs/reference/query-dsl/boosting-query.asciidoc +++ b/docs/reference/query-dsl/boosting-query.asciidoc @@ -6,6 +6,12 @@ match a given query. Unlike the "NOT" clause in bool query, this still selects documents that contain undesirable terms, but reduces their overall score. +It accepts a `positive` query and a `negative` query. +Only documents that match the `positive` query will be included +in the results list, but documents that also match the `negative` query +will be downgraded by multiplying the original `_score` of the document +with the `negative_boost`. + [source,js] -------------------------------------------------- GET /_search diff --git a/docs/reference/search/count.asciidoc b/docs/reference/search/count.asciidoc index 3b32222786f85..19ba77cd12979 100644 --- a/docs/reference/search/count.asciidoc +++ b/docs/reference/search/count.asciidoc @@ -14,9 +14,9 @@ PUT /twitter/_doc/1?refresh "user": "kimchy" } -GET /twitter/_doc/_count?q=user:kimchy +GET /twitter/_count?q=user:kimchy -GET /twitter/_doc/_count +GET /twitter/_count { "query" : { "term" : { "user" : "kimchy" } diff --git a/docs/reference/search/validate.asciidoc b/docs/reference/search/validate.asciidoc index c05a0cb55646e..c17f1393e1520 100644 --- a/docs/reference/search/validate.asciidoc +++ b/docs/reference/search/validate.asciidoc @@ -60,7 +60,7 @@ The query may also be sent in the request body: [source,js] -------------------------------------------------- -GET twitter/_doc/_validate/query +GET twitter/_validate/query { "query" : { "bool" : { @@ -87,7 +87,7 @@ due to dynamic mapping, and 'foo' does not correctly parse into a date: [source,js] -------------------------------------------------- -GET twitter/_doc/_validate/query +GET twitter/_validate/query { "query": { "query_string": { @@ -110,7 +110,7 @@ about why a query failed: [source,js] -------------------------------------------------- -GET twitter/_doc/_validate/query?explain=true +GET twitter/_validate/query?explain=true { "query": { "query_string": { @@ -150,7 +150,7 @@ For More Like This: [source,js] -------------------------------------------------- -GET twitter/_doc/_validate/query?rewrite=true +GET twitter/_validate/query?rewrite=true { "query": { "more_like_this": { @@ -197,7 +197,7 @@ For Fuzzy Queries: [source,js] -------------------------------------------------- -GET twitter/_doc/_validate/query?rewrite=true&all_shards=true +GET twitter/_validate/query?rewrite=true&all_shards=true { "query": { "match": { diff --git a/docs/reference/sql/endpoints/jdbc.asciidoc b/docs/reference/sql/endpoints/jdbc.asciidoc index 98589043f614d..35e3ecf95d74c 100644 --- a/docs/reference/sql/endpoints/jdbc.asciidoc +++ b/docs/reference/sql/endpoints/jdbc.asciidoc @@ -129,7 +129,7 @@ To put all of it together, the following URL: ["source","text"] ---- -jdbc:es://http://server:3456/timezone=UTC&page.size=250 +jdbc:es://http://server:3456/?timezone=UTC&page.size=250 ---- Opens up a {es-sql} connection to `server` on port `3456`, setting the JDBC connection timezone to `UTC` and its pagesize to `250` entries. diff --git a/docs/reference/sql/index.asciidoc b/docs/reference/sql/index.asciidoc index 31a4c6df9e0ea..65e5313de4ac6 100644 --- a/docs/reference/sql/index.asciidoc +++ b/docs/reference/sql/index.asciidoc @@ -36,10 +36,12 @@ indices and return results in tabular format. SQL and print tabular results. <>:: A JDBC driver for {es}. +{sql-odbc}[ODBC]:: + An ODBC driver for {es}. <>:: Documentation for configuring various SQL/BI tools with {es-sql}. <>:: - Overview of the {es-sql} language, such as supported data types, commands and + Overview of the {es-sql} language, such as supported data types, commands and syntax. <>:: List of functions and operators supported. diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java index e53d0ec3e8c9c..1ad0fff49b78a 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java @@ -109,7 +109,7 @@ protected Class loadClass(String name, boolean resolve) throws ClassNotFoundE BucketAggregationSelectorScript.Factory wrappedFactory = parameters -> new BucketAggregationSelectorScript(parameters) { @Override public boolean execute() { - return factory.newInstance(getParams()).execute() == 1.0; + return factory.newInstance(getParams()).execute().doubleValue() == 1.0; } }; return context.factoryClazz.cast(wrappedFactory); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ContextExampleTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ContextExampleTests.java index f14b270151c67..adce6b6c864a8 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ContextExampleTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ContextExampleTests.java @@ -1,3 +1,5 @@ + + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -359,5 +361,70 @@ public void testScriptFieldsScript() { singletonMap("_source", source), true) ); } + + // Use script query request to filter documents + /* + GET localhost:9200/evening/_search + { + "query": { + "bool" : { + "filter" : { + "script" : { + "script" : { + "source" : "doc['sold'].value == false && doc['cost'].value < params.cost", + "params" : { + "cost" : 18 + } + } + } + } + } + } + } + */ + + public void testFilterScript() { + Map source = new HashMap<>(); + source.put("sold", false); + source.put("cost", 15); + + Map params = new HashMap<>(); + params.put("_source", source); + params.put("cost", 18); + + boolean result = (boolean) exec( + " params['_source']['sold'] == false && params['_source']['cost'] < params.cost;", + params, true); + assertTrue(result); + } + + + // Use script_fields API to add two extra fields to the hits + /* + curl -X GET localhost:9200/seats/_search + { + "query" : { + "terms_set": { + "actors" : { + "terms" : ["smith", "earns", "black"], + "minimum_should_match_script": { + "source": "Math.min(params['num_terms'], params['min_actors_to_see'])", + "params" : { + "min_actors_to_see" : 2 + } + } + } + } + } + } + */ + public void testMinShouldMatchScript() { + Map params = new HashMap<>(); + params.put("num_terms", 3); + params.put("min_actors_to_see", 2); + + double result = (double) exec("Math.min(params['num_terms'], params['min_actors_to_see']);", params, true); + assertEquals(2, result, 0); + } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index 7efd1ee5d6e72..b4199293f79b3 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -35,8 +35,6 @@ import org.elasticsearch.action.bulk.BulkItemResponse.Failure; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.index.reindex.ScrollableHitSource.Hit; -import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexRequest; @@ -65,6 +63,8 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.index.reindex.ScrollableHitSource.Hit; +import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; @@ -323,8 +323,13 @@ public ScheduledFuture schedule(TimeValue delay, String name, Runnable comman // While we're here we can check that the sleep made it through assertThat(delay.nanos(), greaterThan(0L)); assertThat(delay.seconds(), lessThanOrEqualTo(10L)); - ((AbstractRunnable) command).onRejection(new EsRejectedExecutionException("test")); - return null; + final EsRejectedExecutionException exception = new EsRejectedExecutionException("test"); + if (command instanceof AbstractRunnable) { + ((AbstractRunnable) command).onRejection(exception); + return null; + } else { + throw exception; + } } }); ScrollableHitSource.Response response = new ScrollableHitSource.Response(false, emptyList(), 0, emptyList(), null); diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java index 987942ef4f031..2d46ec0986296 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java @@ -24,12 +24,14 @@ import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse; import com.microsoft.windowsazure.management.compute.models.InstanceEndpoint; import com.microsoft.windowsazure.management.compute.models.RoleInstance; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cloud.azure.classic.AzureServiceDisableException; import org.elasticsearch.cloud.azure.classic.AzureServiceRemoteException; import org.elasticsearch.cloud.azure.classic.management.AzureComputeService; import org.elasticsearch.cloud.azure.classic.management.AzureComputeService.Discovery; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; @@ -45,7 +47,9 @@ import java.util.ArrayList; import java.util.List; -public class AzureUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { +public class AzureUnicastHostsProvider implements UnicastHostsProvider { + + private static final Logger logger = LogManager.getLogger(AzureUnicastHostsProvider.class); public enum HostType { PRIVATE_IP("private_ip"), diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java index 6d677d03c3909..5f384c049124e 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java @@ -28,17 +28,20 @@ import com.amazonaws.retry.RetryPolicy; import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.AmazonEC2Client; + +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.util.LazyInitializable; import java.util.Random; import java.util.concurrent.atomic.AtomicReference; -class AwsEc2ServiceImpl extends AbstractComponent implements AwsEc2Service { +class AwsEc2ServiceImpl implements AwsEc2Service { + + private static final Logger logger = LogManager.getLogger(AwsEc2ServiceImpl.class); private final AtomicReference> lazyClientReference = new AtomicReference<>(); diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java index 2817c1c3b60bf..0af17091cf3f7 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java @@ -27,9 +27,11 @@ import com.amazonaws.services.ec2.model.Instance; import com.amazonaws.services.ec2.model.Reservation; import com.amazonaws.services.ec2.model.Tag; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; @@ -44,13 +46,15 @@ import java.util.Set; import static java.util.Collections.disjoint; -import static org.elasticsearch.discovery.ec2.AwsEc2Service.HostType.TAG_PREFIX; import static org.elasticsearch.discovery.ec2.AwsEc2Service.HostType.PRIVATE_DNS; import static org.elasticsearch.discovery.ec2.AwsEc2Service.HostType.PRIVATE_IP; import static org.elasticsearch.discovery.ec2.AwsEc2Service.HostType.PUBLIC_DNS; import static org.elasticsearch.discovery.ec2.AwsEc2Service.HostType.PUBLIC_IP; +import static org.elasticsearch.discovery.ec2.AwsEc2Service.HostType.TAG_PREFIX; -class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { +class AwsEc2UnicastHostsProvider implements UnicastHostsProvider { + + private static final Logger logger = LogManager.getLogger(AwsEc2UnicastHostsProvider.class); private final TransportService transportService; diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2NameResolver.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2NameResolver.java index e9dd3a10e4cc3..ff675cf5d1af1 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2NameResolver.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2NameResolver.java @@ -20,10 +20,12 @@ package org.elasticsearch.discovery.ec2; import com.amazonaws.util.EC2MetadataUtils; -import org.elasticsearch.core.internal.io.IOUtils; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.network.NetworkService.CustomNameResolver; +import org.elasticsearch.core.internal.io.IOUtils; import java.io.BufferedReader; import java.io.IOException; @@ -51,7 +53,9 @@ * * @author Paul_Loy (keteracel) */ -class Ec2NameResolver extends AbstractComponent implements CustomNameResolver { +class Ec2NameResolver implements CustomNameResolver { + + private static final Logger logger = LogManager.getLogger(Ec2NameResolver.class); /** * enum that can be added to over time with more meta-data types (such as ipv6 when this is available) diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java index 116bf1842d065..e765a66486b03 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java @@ -19,14 +19,6 @@ package org.elasticsearch.cloud.gce; -import java.io.IOException; -import java.security.GeneralSecurityException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.function.Function; - import com.google.api.client.googleapis.compute.ComputeCredential; import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport; import com.google.api.client.http.GenericUrl; @@ -41,17 +33,29 @@ import com.google.api.services.compute.Compute; import com.google.api.services.compute.model.Instance; import com.google.api.services.compute.model.InstanceList; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cloud.gce.util.Access; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.gce.RetryHttpInitializerWrapper; -public class GceInstancesServiceImpl extends AbstractComponent implements GceInstancesService { +import java.io.IOException; +import java.security.GeneralSecurityException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.function.Function; + +public class GceInstancesServiceImpl implements GceInstancesService { + + private static final Logger logger = LogManager.getLogger(GceInstancesServiceImpl.class); // all settings just used for testing - not registered by default public static final Setting GCE_VALIDATE_CERTIFICATES = diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java index 2d1bb07b23909..a593faabcf622 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java @@ -19,21 +19,16 @@ package org.elasticsearch.discovery.gce; -import java.io.IOException; -import java.net.InetAddress; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.function.Function; - import com.google.api.services.compute.model.AccessConfig; import com.google.api.services.compute.model.Instance; import com.google.api.services.compute.model.NetworkInterface; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cloud.gce.GceInstancesService; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; @@ -44,9 +39,18 @@ import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.transport.TransportService; +import java.io.IOException; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.function.Function; + import static java.util.Collections.emptyList; -public class GceUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { +public class GceUnicastHostsProvider implements UnicastHostsProvider { + + private static final Logger logger = LogManager.getLogger(GceUnicastHostsProvider.class); /** * discovery.gce.tags: The gce discovery can filter machines to include in the cluster based on tags. diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index 6268a3372843e..654836ea0fbef 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -20,14 +20,17 @@ package org.elasticsearch.repositories.azure; import com.microsoft.azure.storage.LocationMode; - import com.microsoft.azure.storage.StorageException; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; -import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.repositories.azure.AzureRepository.Repository; + import java.io.IOException; import java.io.InputStream; import java.net.URISyntaxException; @@ -36,9 +39,9 @@ import static java.util.Collections.emptyMap; -import static org.elasticsearch.repositories.azure.AzureRepository.Repository; - -public class AzureBlobStore extends AbstractComponent implements BlobStore { +public class AzureBlobStore implements BlobStore { + + private static final Logger logger = LogManager.getLogger(AzureBlobStore.class); private final AzureStorageService service; diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index ec461cf38f294..79e8d4c6235ae 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -34,12 +34,14 @@ import com.microsoft.azure.storage.blob.CloudBlockBlob; import com.microsoft.azure.storage.blob.DeleteSnapshotsOption; import com.microsoft.azure.storage.blob.ListBlobItem; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -58,7 +60,9 @@ import static java.util.Collections.emptyMap; -public class AzureStorageService extends AbstractComponent { +public class AzureStorageService { + + private static final Logger logger = LogManager.getLogger(AzureStorageService.class); public static final ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES); /** diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index 7894f9fc7df63..84184660159a4 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -28,6 +28,9 @@ import com.google.cloud.storage.Storage; import com.google.cloud.storage.Storage.BlobListOption; import com.google.cloud.storage.StorageException; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; @@ -36,7 +39,6 @@ import org.elasticsearch.common.blobstore.BlobStoreException; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.core.internal.io.Streams; import java.io.ByteArrayOutputStream; @@ -56,7 +58,9 @@ import static java.net.HttpURLConnection.HTTP_NOT_FOUND; import static java.net.HttpURLConnection.HTTP_PRECON_FAILED; -class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore { +class GoogleCloudStorageBlobStore implements BlobStore { + + private static final Logger logger = LogManager.getLogger(GoogleCloudStorageBlobStore.class); // The recommended maximum size of a blob that should be uploaded in a single // request. Larger files should be uploaded over multiple requests (this is diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java index b38957651df10..d2e5b89b040e1 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java @@ -28,10 +28,11 @@ import com.google.cloud.storage.Storage; import com.google.cloud.storage.StorageOptions; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.LazyInitializable; @@ -45,7 +46,9 @@ import static java.util.Collections.emptyMap; -public class GoogleCloudStorageService extends AbstractComponent { +public class GoogleCloudStorageService { + + private static final Logger logger = LogManager.getLogger(GoogleCloudStorageService.class); /** * Dictionary of client instances. Client instances are built lazily from the diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index e1ffc7a22d44c..a43df0d2a06bb 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -27,18 +27,18 @@ import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.S3ObjectSummary; import com.amazonaws.services.s3.model.StorageClass; + import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.BlobStoreException; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.unit.ByteSizeValue; import java.io.IOException; import java.util.ArrayList; import java.util.Locale; -class S3BlobStore extends AbstractComponent implements BlobStore { +class S3BlobStore implements BlobStore { private final S3Service service; diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index 837f2ea34c752..a5ee861d0c38b 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -29,10 +29,11 @@ import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.internal.Constants; + +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.component.AbstractComponent; import java.io.Closeable; import java.io.IOException; @@ -41,7 +42,9 @@ import static java.util.Collections.emptyMap; -class S3Service extends AbstractComponent implements Closeable { +class S3Service implements Closeable { + + private static final Logger logger = LogManager.getLogger(S3Service.class); private volatile Map clientsCache = emptyMap(); private volatile Map clientsSettings = emptyMap(); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index ec5d5578a03a2..6288cc181aac4 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -26,6 +26,9 @@ import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.HeadBucketRequest; import com.amazonaws.services.s3.model.HeadBucketResult; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.settings.MockSecureSettings; @@ -68,6 +71,9 @@ public HeadBucketResult headBucket(HeadBucketRequest headBucketRequest) throws A } static final class ProxyS3Service extends S3Service { + + private static final Logger logger = LogManager.getLogger(ProxyS3Service.class); + @Override AmazonS3 buildClient(final S3ClientSettings clientSettings) { final AmazonS3 client = super.buildClient(clientSettings); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/count/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/count/10_basic.yml index 32256811e0f51..a8ef8c01e51a9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/count/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/count/10_basic.yml @@ -18,7 +18,6 @@ setup: - do: count: index: test - type: test body: query: match: @@ -29,7 +28,6 @@ setup: - do: count: index: test - type: test body: query: match: @@ -43,7 +41,6 @@ setup: - do: count: index: test - type: test body: { } - match: {count : 1} @@ -51,7 +48,6 @@ setup: - do: count: index: test - type: test - match: {count : 1} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/count/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/count/11_basic_with_types.yml new file mode 100644 index 0000000000000..48cfc610b435e --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/count/11_basic_with_types.yml @@ -0,0 +1,66 @@ +setup: + - do: + indices.create: + index: test + - do: + index: + index: test + type: test + id: 1 + body: { foo: bar } + + - do: + indices.refresh: + index: [test] + +--- +"count with body": + - do: + count: + index: test + type: test + body: + query: + match: + foo: bar + + - match: {count : 1} + + - do: + count: + index: test + body: + query: + match: + foo: test + + - match: {count : 0} + +--- +"count with empty body": +# empty body should default to match_all query + - do: + count: + index: test + type: test + body: { } + + - match: {count : 1} + + - do: + count: + index: test + type: test + + - match: {count : 1} + +--- +"count body without query element": + - do: + catch: bad_request + count: + index: test + type: test + body: + match: + foo: bar diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml index a14423cef1154..c16844478f1fa 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml @@ -50,7 +50,7 @@ setup: - index: index_3 - query: match_all: {} - - type: test + - {} - query: match_all: {} @@ -82,7 +82,7 @@ setup: - index: index_3 - query: match_all: {} - - type: test + - {} - query: match_all: {} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/12_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/12_basic_with_types.yml new file mode 100644 index 0000000000000..a14423cef1154 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/12_basic_with_types.yml @@ -0,0 +1,95 @@ +--- +setup: + + - do: + index: + index: index_1 + type: test + id: 1 + body: { foo: bar } + + - do: + index: + index: index_1 + type: test + id: 2 + body: { foo: baz } + + - do: + index: + index: index_1 + type: test + id: 3 + body: { foo: foo } + + - do: + index: + index: index_2 + type: test + id: 1 + body: { foo: foo } + + - do: + indices.refresh: {} + +--- +"Basic multi-search": + + - do: + msearch: + body: + - index: index_* + - query: + match: {foo: foo} + - index: index_2 + - query: + match_all: {} + - index: index_1 + - query: + match: {foo: foo} + - index: index_3 + - query: + match_all: {} + - type: test + - query: + match_all: {} + + - match: { responses.0.hits.total: 2 } + - match: { responses.1.hits.total: 1 } + - match: { responses.2.hits.total: 1 } + - match: { responses.3.error.root_cause.0.type: index_not_found_exception } + - match: { responses.3.error.root_cause.0.reason: "/no.such.index/" } + - match: { responses.3.error.root_cause.0.index: index_3 } + - match: { responses.4.hits.total: 4 } + +--- +"Least impact smoke test": +# only passing these parameters to make sure they are consumed + - do: + msearch: + max_concurrent_shard_requests: 1 + max_concurrent_searches: 1 + body: + - index: index_* + - query: + match: {foo: foo} + - index: index_2 + - query: + match_all: {} + - index: index_1 + - query: + match: {foo: foo} + - index: index_3 + - query: + match_all: {} + - type: test + - query: + match_all: {} + + - match: { responses.0.hits.total: 2 } + - match: { responses.1.hits.total: 1 } + - match: { responses.2.hits.total: 1 } + - match: { responses.3.error.root_cause.0.type: index_not_found_exception } + - match: { responses.3.error.root_cause.0.reason: "/no.such.index/" } + - match: { responses.3.error.root_cause.0.index: index_3 } + - match: { responses.4.hits.total: 4 } diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 854c4aee00ba2..5d0ef95bd7cdd 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -105,10 +105,10 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_4_2 = new Version(V_6_4_2_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); public static final int V_6_4_3_ID = 6040399; public static final Version V_6_4_3 = new Version(V_6_4_3_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); - public static final int V_6_4_4_ID = 6040499; - public static final Version V_6_4_4 = new Version(V_6_4_4_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); public static final int V_6_5_0_ID = 6050099; public static final Version V_6_5_0 = new Version(V_6_5_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); + public static final int V_6_5_1_ID = 6050199; + public static final Version V_6_5_1 = new Version(V_6_5_1_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); public static final int V_6_6_0_ID = 6060099; public static final Version V_6_6_0 = new Version(V_6_6_0_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); public static final int V_7_0_0_ID = 7000099; @@ -131,10 +131,10 @@ public static Version fromId(int id) { return V_7_0_0; case V_6_6_0_ID: return V_6_6_0; + case V_6_5_1_ID: + return V_6_5_1; case V_6_5_0_ID: return V_6_5_0; - case V_6_4_4_ID: - return V_6_4_4; case V_6_4_3_ID: return V_6_4_3; case V_6_4_2_ID: diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 4e589f613a6fb..71eddc65c18a4 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -310,6 +310,7 @@ import org.elasticsearch.rest.action.ingest.RestPutPipelineAction; import org.elasticsearch.rest.action.ingest.RestSimulatePipelineAction; import org.elasticsearch.rest.action.search.RestClearScrollAction; +import org.elasticsearch.rest.action.search.RestCountAction; import org.elasticsearch.rest.action.search.RestExplainAction; import org.elasticsearch.rest.action.search.RestMultiSearchAction; import org.elasticsearch.rest.action.search.RestSearchAction; @@ -595,7 +596,7 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestGetSourceAction(settings, restController)); registerHandler.accept(new RestMultiGetAction(settings, restController)); registerHandler.accept(new RestDeleteAction(settings, restController)); - registerHandler.accept(new org.elasticsearch.rest.action.document.RestCountAction(settings, restController)); + registerHandler.accept(new RestCountAction(settings, restController)); registerHandler.accept(new RestTermVectorsAction(settings, restController)); registerHandler.accept(new RestMultiTermVectorsAction(settings, restController)); registerHandler.accept(new RestBulkAction(settings, restController)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index 1e8d2adbff829..0caae77d7dead 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -68,7 +68,8 @@ protected String executor() { @Override protected ClusterBlockException checkBlock(ClusterHealthRequest request, ClusterState state) { - return null; // we want users to be able to call this even when there are global blocks, just to check the health (are there blocks?) + // we want users to be able to call this even when there are global blocks, just to check the health (are there blocks?) + return null; } @Override @@ -77,64 +78,69 @@ protected ClusterHealthResponse newResponse() { } @Override - protected final void masterOperation(ClusterHealthRequest request, ClusterState state, ActionListener listener) throws Exception { + protected final void masterOperation(ClusterHealthRequest request, ClusterState state, + ActionListener listener) throws Exception { logger.warn("attempt to execute a cluster health operation without a task"); throw new UnsupportedOperationException("task parameter is required for this operation"); } @Override - protected void masterOperation(Task task, final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener listener) { + protected void masterOperation(Task task, final ClusterHealthRequest request, final ClusterState unusedState, + final ActionListener listener) { if (request.waitForEvents() != null) { final long endTimeMS = TimeValue.nsecToMSec(System.nanoTime()) + request.timeout().millis(); if (request.local()) { - clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", new LocalClusterUpdateTask(request.waitForEvents()) { - @Override - public ClusterTasksResult execute(ClusterState currentState) { - return unchanged(); - } + clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", + new LocalClusterUpdateTask(request.waitForEvents()) { + @Override + public ClusterTasksResult execute(ClusterState currentState) { + return unchanged(); + } - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - final long timeoutInMillis = Math.max(0, endTimeMS - TimeValue.nsecToMSec(System.nanoTime())); - final TimeValue newTimeout = TimeValue.timeValueMillis(timeoutInMillis); - request.timeout(newTimeout); - executeHealth(request, listener); - } + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + final long timeoutInMillis = Math.max(0, endTimeMS - TimeValue.nsecToMSec(System.nanoTime())); + final TimeValue newTimeout = TimeValue.timeValueMillis(timeoutInMillis); + request.timeout(newTimeout); + executeHealth(request, listener); + } - @Override - public void onFailure(String source, Exception e) { - logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); - listener.onFailure(e); - } - }); + @Override + public void onFailure(String source, Exception e) { + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + listener.onFailure(e); + } + }); } else { - clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", new ClusterStateUpdateTask(request.waitForEvents()) { - @Override - public ClusterState execute(ClusterState currentState) { - return currentState; - } + clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", + new ClusterStateUpdateTask(request.waitForEvents()) { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState; + } - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - final long timeoutInMillis = Math.max(0, endTimeMS - TimeValue.nsecToMSec(System.nanoTime())); - final TimeValue newTimeout = TimeValue.timeValueMillis(timeoutInMillis); - request.timeout(newTimeout); - executeHealth(request, listener); - } + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + final long timeoutInMillis = Math.max(0, endTimeMS - TimeValue.nsecToMSec(System.nanoTime())); + final TimeValue newTimeout = TimeValue.timeValueMillis(timeoutInMillis); + request.timeout(newTimeout); + executeHealth(request, listener); + } - @Override - public void onNoLongerMaster(String source) { - logger.trace("stopped being master while waiting for events with priority [{}]. retrying.", request.waitForEvents()); - // TransportMasterNodeAction implements the retry logic, which is triggered by passing a NotMasterException - listener.onFailure(new NotMasterException("no longer master. source: [" + source + "]")); - } + @Override + public void onNoLongerMaster(String source) { + logger.trace("stopped being master while waiting for events with priority [{}]. retrying.", + request.waitForEvents()); + // TransportMasterNodeAction implements the retry logic, which is triggered by passing a NotMasterException + listener.onFailure(new NotMasterException("no longer master. source: [" + source + "]")); + } - @Override - public void onFailure(String source, Exception e) { - logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); - listener.onFailure(e); - } - }); + @Override + public void onFailure(String source, Exception e) { + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + listener.onFailure(e); + } + }); } } else { executeHealth(request, listener); @@ -164,7 +170,8 @@ private void executeHealth(final ClusterHealthRequest request, final ActionListe } final ClusterState state = clusterService.state(); - final ClusterStateObserver observer = new ClusterStateObserver(state, clusterService, null, logger, threadPool.getThreadContext()); + final ClusterStateObserver observer = new ClusterStateObserver(state, clusterService, + null, logger, threadPool.getThreadContext()); if (request.timeout().millis() == 0) { listener.onResponse(getResponse(request, state, waitFor, request.timeout().millis() == 0)); return; @@ -203,7 +210,8 @@ private boolean validateRequest(final ClusterHealthRequest request, ClusterState return readyCounter == waitFor; } - private ClusterHealthResponse getResponse(final ClusterHealthRequest request, ClusterState clusterState, final int waitFor, boolean timedOut) { + private ClusterHealthResponse getResponse(final ClusterHealthRequest request, ClusterState clusterState, + final int waitFor, boolean timedOut) { ClusterHealthResponse response = clusterHealth(request, clusterState, clusterService.getMasterService().numberOfPendingTasks(), gatewayAllocator.getNumberOfInFlightFetch(), clusterService.getMasterService().getMaxTaskWaitTime()); int readyCounter = prepareResponse(request, response, clusterState, indexNameExpressionResolver); @@ -305,8 +313,8 @@ static int prepareResponse(final ClusterHealthRequest request, final ClusterHeal } - private ClusterHealthResponse clusterHealth(ClusterHealthRequest request, ClusterState clusterState, int numberOfPendingTasks, int numberOfInFlightFetch, - TimeValue pendingTaskTimeInQueue) { + private ClusterHealthResponse clusterHealth(ClusterHealthRequest request, ClusterState clusterState, int numberOfPendingTasks, + int numberOfInFlightFetch, TimeValue pendingTaskTimeInQueue) { if (logger.isTraceEnabled()) { logger.trace("Calculating health based on state version [{}]", clusterState.version()); } @@ -316,9 +324,9 @@ private ClusterHealthResponse clusterHealth(ClusterHealthRequest request, Cluste concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request); } catch (IndexNotFoundException e) { // one of the specified indices is not there - treat it as RED. - ClusterHealthResponse response = new ClusterHealthResponse(clusterState.getClusterName().value(), Strings.EMPTY_ARRAY, clusterState, - numberOfPendingTasks, numberOfInFlightFetch, UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), - pendingTaskTimeInQueue); + ClusterHealthResponse response = new ClusterHealthResponse(clusterState.getClusterName().value(), Strings.EMPTY_ARRAY, + clusterState, numberOfPendingTasks, numberOfInFlightFetch, UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), + pendingTaskTimeInQueue); response.setStatus(ClusterHealthStatus.RED); return response; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index 6347a027f4f89..cdef2a03b534c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -41,7 +41,8 @@ public class ClusterStatsNodeResponse extends BaseNodeResponse { ClusterStatsNodeResponse() { } - public ClusterStatsNodeResponse(DiscoveryNode node, @Nullable ClusterHealthStatus clusterStatus, NodeInfo nodeInfo, NodeStats nodeStats, ShardStats[] shardsStats) { + public ClusterStatsNodeResponse(DiscoveryNode node, @Nullable ClusterHealthStatus clusterStatus, + NodeInfo nodeInfo, NodeStats nodeStats, ShardStats[] shardsStats) { super(node); this.nodeInfo = nodeInfo; this.nodeStats = nodeStats; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java index a30c9ba846107..4f993e40d0b8b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java @@ -87,14 +87,22 @@ public ValidateQueryRequest query(QueryBuilder query) { /** * The types of documents the query will run against. Defaults to all types. + * + * @deprecated Types are in the process of being removed. Instead of using a type, prefer to + * filter on a field on the document. */ + @Deprecated public String[] types() { return this.types; } /** * The types of documents the query will run against. Defaults to all types. + * + * @deprecated Types are in the process of being removed. Instead of using a type, prefer to + * filter on a field on the document. */ + @Deprecated public ValidateQueryRequest types(String... types) { this.types = types; return this; diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java index 991e81a14553b..7a3eaea88c099 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java @@ -67,7 +67,7 @@ public class SimulatePipelineResponse extends ActionResponse implements ToXConte ensureExpectedToken(Token.START_OBJECT, token, parser::getTokenLocation); SimulateDocumentResult result = null; while ((token = parser.nextToken()) != Token.END_OBJECT) { - ensureExpectedToken(token, Token.FIELD_NAME, parser::getTokenLocation); + ensureExpectedToken(Token.FIELD_NAME, token, parser::getTokenLocation); String fieldName = parser.currentName(); token = parser.nextToken(); if (token == Token.START_ARRAY) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 2c24d2852217e..01ef94c428a41 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -171,7 +171,8 @@ public SortedTopDocs sortDocs(boolean ignoreFrom, Collection 0) { // make sure we set the shard index before we add it - the consumer didn't do that yet + // make sure we set the shard index before we add it - the consumer didn't do that yet + if (td.topDocs.scoreDocs.length > 0) { setShardIndex(td.topDocs, queryResult.getShardIndex()); topDocs.add(td.topDocs); } @@ -308,7 +309,8 @@ public IntArrayList[] fillDocIdsToLoad(int numShards, ScoreDoc[] shardDocs) { * completion suggestion ordered by suggestion name */ public InternalSearchResponse merge(boolean ignoreFrom, ReducedQueryPhase reducedQueryPhase, - Collection fetchResults, IntFunction resultsLookup) { + Collection fetchResults, + IntFunction resultsLookup) { if (reducedQueryPhase.isEmptyResult) { return InternalSearchResponse.empty(); } @@ -416,7 +418,8 @@ public ReducedQueryPhase reducedQueryPhase(Collection queryResults, boolean isScrollRequest, boolean trackTotalHits) { + public ReducedQueryPhase reducedQueryPhase(Collection queryResults, + boolean isScrollRequest, boolean trackTotalHits) { return reducedQueryPhase(queryResults, null, new ArrayList<>(), new TopDocsStats(trackTotalHits), 0, isScrollRequest); } @@ -441,7 +444,8 @@ private ReducedQueryPhase reducedQueryPhase(Collection { @@ -340,15 +310,6 @@ protected void doRun() throws Exception { @Override public void onResponse(PrimaryShardReference primaryShardReference) { try { - final ClusterState clusterState = clusterService.state(); - final IndexMetaData indexMetaData = clusterState.metaData().getIndexSafe(primaryShardReference.routingEntry().index()); - - final ClusterBlockException blockException = blockExceptions(clusterState, indexMetaData.getIndex().getName()); - if (blockException != null) { - logger.trace("cluster is blocked, action failed on primary", blockException); - throw blockException; - } - if (primaryShardReference.isRelocated()) { primaryShardReference.close(); // release shard operation lock as soon as possible setPhase(replicationTask, "primary_delegation"); @@ -362,7 +323,7 @@ public void onResponse(PrimaryShardReference primaryShardReference) { response.readFrom(in); return response; }; - DiscoveryNode relocatingNode = clusterState.nodes().get(primary.relocatingNodeId()); + DiscoveryNode relocatingNode = clusterService.state().nodes().get(primary.relocatingNodeId()); transportService.sendRequest(relocatingNode, transportPrimaryAction, new ConcreteShardRequest<>(request, primary.allocationId().getRelocationId(), primaryTerm), transportOptions, @@ -735,42 +696,35 @@ public void onFailure(Exception e) { protected void doRun() { setPhase(task, "routing"); final ClusterState state = observer.setAndGetObservedState(); - final String concreteIndex = concreteIndex(state, request); - final ClusterBlockException blockException = blockExceptions(state, concreteIndex); - if (blockException != null) { - if (blockException.retryable()) { - logger.trace("cluster is blocked, scheduling a retry", blockException); - retry(blockException); - } else { - finishAsFailed(blockException); - } - } else { - // request does not have a shardId yet, we need to pass the concrete index to resolve shardId - final IndexMetaData indexMetaData = state.metaData().index(concreteIndex); - if (indexMetaData == null) { - retry(new IndexNotFoundException(concreteIndex)); - return; - } - if (indexMetaData.getState() == IndexMetaData.State.CLOSE) { - throw new IndexClosedException(indexMetaData.getIndex()); - } + if (handleBlockExceptions(state)) { + return; + } + + // request does not have a shardId yet, we need to pass the concrete index to resolve shardId + final String concreteIndex = concreteIndex(state); + final IndexMetaData indexMetaData = state.metaData().index(concreteIndex); + if (indexMetaData == null) { + retry(new IndexNotFoundException(concreteIndex)); + return; + } + if (indexMetaData.getState() == IndexMetaData.State.CLOSE) { + throw new IndexClosedException(indexMetaData.getIndex()); + } - // resolve all derived request fields, so we can route and apply it - resolveRequest(indexMetaData, request); - assert request.shardId() != null : "request shardId must be set in resolveRequest"; - assert request.waitForActiveShards() != ActiveShardCount.DEFAULT : - "request waitForActiveShards must be set in resolveRequest"; + // resolve all derived request fields, so we can route and apply it + resolveRequest(indexMetaData, request); + assert request.shardId() != null : "request shardId must be set in resolveRequest"; + assert request.waitForActiveShards() != ActiveShardCount.DEFAULT : "request waitForActiveShards must be set in resolveRequest"; - final ShardRouting primary = primary(state); - if (retryIfUnavailable(state, primary)) { - return; - } - final DiscoveryNode node = state.nodes().get(primary.currentNodeId()); - if (primary.currentNodeId().equals(state.nodes().getLocalNodeId())) { - performLocalAction(state, primary, node, indexMetaData); - } else { - performRemoteAction(state, primary, node); - } + final ShardRouting primary = primary(state); + if (retryIfUnavailable(state, primary)) { + return; + } + final DiscoveryNode node = state.nodes().get(primary.currentNodeId()); + if (primary.currentNodeId().equals(state.nodes().getLocalNodeId())) { + performLocalAction(state, primary, node, indexMetaData); + } else { + performRemoteAction(state, primary, node); } } @@ -822,11 +776,44 @@ private boolean retryIfUnavailable(ClusterState state, ShardRouting primary) { return false; } + private String concreteIndex(ClusterState state) { + return resolveIndex() ? indexNameExpressionResolver.concreteSingleIndex(state, request).getName() : request.index(); + } + private ShardRouting primary(ClusterState state) { IndexShardRoutingTable indexShard = state.getRoutingTable().shardRoutingTable(request.shardId()); return indexShard.primaryShard(); } + private boolean handleBlockExceptions(ClusterState state) { + ClusterBlockLevel globalBlockLevel = globalBlockLevel(); + if (globalBlockLevel != null) { + ClusterBlockException blockException = state.blocks().globalBlockedException(globalBlockLevel); + if (blockException != null) { + handleBlockException(blockException); + return true; + } + } + ClusterBlockLevel indexBlockLevel = indexBlockLevel(); + if (indexBlockLevel != null) { + ClusterBlockException blockException = state.blocks().indexBlockedException(indexBlockLevel, concreteIndex(state)); + if (blockException != null) { + handleBlockException(blockException); + return true; + } + } + return false; + } + + private void handleBlockException(ClusterBlockException blockException) { + if (blockException.retryable()) { + logger.trace("cluster is blocked, scheduling a retry", blockException); + retry(blockException); + } else { + finishAsFailed(blockException); + } + } + private void performAction(final DiscoveryNode node, final String action, final boolean isPrimaryAction, final TransportRequest requestToPerform) { transportService.sendRequest(node, action, requestToPerform, transportOptions, new TransportResponseHandler() { diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java index fda393e375c9e..938489d6cbedf 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java @@ -38,7 +38,8 @@ import java.util.List; import java.util.Set; -public class MultiTermVectorsRequest extends ActionRequest implements Iterable, CompositeIndicesRequest, RealtimeRequest { +public class MultiTermVectorsRequest extends ActionRequest + implements Iterable, CompositeIndicesRequest, RealtimeRequest { String preference; List requests = new ArrayList<>(); diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java index 4cd02caf91c32..dc849ca3d1334 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java @@ -616,18 +616,21 @@ public static void parseRequest(TermVectorsRequest termVectorsRequest, XContentP termVectorsRequest.perFieldAnalyzer(readPerFieldAnalyzer(parser.map())); } else if (FILTER.match(currentFieldName, parser.getDeprecationHandler())) { termVectorsRequest.filterSettings(readFilterSettings(parser)); - } else if (INDEX.match(currentFieldName, parser.getDeprecationHandler())) { // the following is important for multi request parsing. + } else if (INDEX.match(currentFieldName, parser.getDeprecationHandler())) { + // the following is important for multi request parsing. termVectorsRequest.index = parser.text(); } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { termVectorsRequest.type = parser.text(); } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { if (termVectorsRequest.doc != null) { - throw new ElasticsearchParseException("failed to parse term vectors request. either [id] or [doc] can be specified, but not both!"); + throw new ElasticsearchParseException("failed to parse term vectors request. " + + "either [id] or [doc] can be specified, but not both!"); } termVectorsRequest.id = parser.text(); } else if (DOC.match(currentFieldName, parser.getDeprecationHandler())) { if (termVectorsRequest.id != null) { - throw new ElasticsearchParseException("failed to parse term vectors request. either [id] or [doc] can be specified, but not both!"); + throw new ElasticsearchParseException("failed to parse term vectors request. " + + "either [id] or [doc] can be specified, but not both!"); } termVectorsRequest.doc(jsonBuilder().copyCurrentStructure(parser)); } else if (ROUTING.match(currentFieldName, parser.getDeprecationHandler())) { @@ -653,7 +656,8 @@ public static Map readPerFieldAnalyzer(Map map) if (e.getValue() instanceof String) { mapStrStr.put(e.getKey(), (String) e.getValue()); } else { - throw new ElasticsearchParseException("expecting the analyzer at [{}] to be a String, but found [{}] instead", e.getKey(), e.getValue().getClass()); + throw new ElasticsearchParseException("expecting the analyzer at [{}] to be a String, but found [{}] instead", + e.getKey(), e.getValue().getClass()); } } return mapStrStr; @@ -682,7 +686,8 @@ private static FilterSettings readFilterSettings(XContentParser parser) throws I } else if (currentFieldName.equals("max_word_length")) { settings.maxWordLength = parser.intValue(); } else { - throw new ElasticsearchParseException("failed to parse term vectors request. the field [{}] is not valid for filter parameter for term vector request", currentFieldName); + throw new ElasticsearchParseException("failed to parse term vectors request. " + + "the field [{}] is not valid for filter parameter for term vector request", currentFieldName); } } } diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java index 01a9812516bf7..9159a07e83c03 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java @@ -197,7 +197,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - private void buildField(XContentBuilder builder, final CharsRefBuilder spare, Fields theFields, Iterator fieldIter) throws IOException { + private void buildField(XContentBuilder builder, final CharsRefBuilder spare, + Fields theFields, Iterator fieldIter) throws IOException { String fieldName = fieldIter.next(); builder.startObject(fieldName); Terms curTerms = theFields.terms(fieldName); @@ -213,7 +214,8 @@ private void buildField(XContentBuilder builder, final CharsRefBuilder spare, Fi builder.endObject(); } - private void buildTerm(XContentBuilder builder, final CharsRefBuilder spare, Terms curTerms, TermsEnum termIter, BoostAttribute boostAtt) throws IOException { + private void buildTerm(XContentBuilder builder, final CharsRefBuilder spare, Terms curTerms, + TermsEnum termIter, BoostAttribute boostAtt) throws IOException { // start term, optimized writing BytesRef term = termIter.next(); spare.copyUTF8Bytes(term); @@ -235,7 +237,8 @@ private void buildTermStatistics(XContentBuilder builder, TermsEnum termIter) th // boolean that says if these values actually were requested. // However, we can assume that they were not if the statistic values are // <= 0. - assert (((termIter.docFreq() > 0) && (termIter.totalTermFreq() > 0)) || ((termIter.docFreq() == -1) && (termIter.totalTermFreq() == -1))); + assert (((termIter.docFreq() > 0) && (termIter.totalTermFreq() > 0)) || + ((termIter.docFreq() == -1) && (termIter.totalTermFreq() == -1))); int docFreq = termIter.docFreq(); if (docFreq > 0) { builder.field(FieldStrings.DOC_FREQ, docFreq); @@ -349,12 +352,13 @@ public void setExists(boolean exists) { this.exists = exists; } - public void setFields(Fields termVectorsByField, Set selectedFields, EnumSet flags, Fields topLevelFields) throws IOException { + public void setFields(Fields termVectorsByField, Set selectedFields, + EnumSet flags, Fields topLevelFields) throws IOException { setFields(termVectorsByField, selectedFields, flags, topLevelFields, null, null); } - public void setFields(Fields termVectorsByField, Set selectedFields, EnumSet flags, Fields topLevelFields, @Nullable AggregatedDfs dfs, - TermVectorsFilter termVectorsFilter) throws IOException { + public void setFields(Fields termVectorsByField, Set selectedFields, EnumSet flags, + Fields topLevelFields, @Nullable AggregatedDfs dfs, TermVectorsFilter termVectorsFilter) throws IOException { TermVectorsWriter tvw = new TermVectorsWriter(this); if (termVectorsByField != null) { diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java index 9aca80b533f66..d38a980c58979 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java @@ -141,10 +141,12 @@ void setFields(Fields termVectorsByField, Set selectedFields, EnumSet shardRequests = new HashMap<>(); for (int i = 0; i < request.requests.size(); i++) { TermVectorsRequest termVectorsRequest = request.requests.get(i); - termVectorsRequest.routing(clusterState.metaData().resolveIndexRouting(termVectorsRequest.routing(), termVectorsRequest.index())); + termVectorsRequest.routing(clusterState.metaData().resolveIndexRouting(termVectorsRequest.routing(), + termVectorsRequest.index())); if (!clusterState.metaData().hasConcreteIndex(termVectorsRequest.index())) { responses.set(i, new MultiTermVectorsItemResponse(null, new MultiTermVectorsResponse.Failure(termVectorsRequest.index(), termVectorsRequest.type(), termVectorsRequest.id(), new IndexNotFoundException(termVectorsRequest.index())))); continue; } String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, termVectorsRequest).getName(); - if (termVectorsRequest.routing() == null && clusterState.getMetaData().routingRequired(concreteSingleIndex, termVectorsRequest.type())) { - responses.set(i, new MultiTermVectorsItemResponse(null, new MultiTermVectorsResponse.Failure(concreteSingleIndex, termVectorsRequest.type(), termVectorsRequest.id(), - new IllegalArgumentException("routing is required for [" + concreteSingleIndex + "]/[" + termVectorsRequest.type() + "]/[" + termVectorsRequest.id() + "]")))); + if (termVectorsRequest.routing() == null && + clusterState.getMetaData().routingRequired(concreteSingleIndex, termVectorsRequest.type())) { + responses.set(i, new MultiTermVectorsItemResponse(null, + new MultiTermVectorsResponse.Failure(concreteSingleIndex, termVectorsRequest.type(), termVectorsRequest.id(), + new IllegalArgumentException("routing is required for [" + concreteSingleIndex + "]/[" + + termVectorsRequest.type() + "]/[" + termVectorsRequest.id() + "]")))); continue; } ShardId shardId = clusterService.operationRouting().shardId(clusterState, concreteSingleIndex, diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java index 6796d23eaadcf..e8d6c1bcb4ff6 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java @@ -36,7 +36,8 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -public class TransportShardMultiTermsVectorAction extends TransportSingleShardAction { +public class TransportShardMultiTermsVectorAction extends + TransportSingleShardAction { private final IndicesService indicesService; @@ -86,7 +87,8 @@ protected MultiTermVectorsShardResponse shardOperation(MultiTermVectorsShardRequ if (TransportActions.isShardNotAvailableException(e)) { throw e; } else { - logger.debug(() -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), e); + logger.debug(() -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", + shardId, termVectorsRequest.type(), termVectorsRequest.id()), e); response.add(request.locations.get(i), new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), e)); } diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java index 49a78275669fc..dcd0fa1b911b9 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java @@ -85,7 +85,8 @@ protected void resolveRequest(ClusterState state, InternalRequest request) { } @Override - protected void asyncShardOperation(TermVectorsRequest request, ShardId shardId, ActionListener listener) throws IOException { + protected void asyncShardOperation(TermVectorsRequest request, ShardId shardId, + ActionListener listener) throws IOException { IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexShard indexShard = indexService.getShard(shardId.id()); if (request.realtime()) { // it's a realtime request which is not subject to refresh cycles diff --git a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index 9cf85c1c7732f..8561d106bdf78 100644 --- a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -75,7 +75,8 @@ public TransportUpdateAction(ThreadPool threadPool, ClusterService clusterServic UpdateHelper updateHelper, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService, AutoCreateIndex autoCreateIndex, NodeClient client) { - super(UpdateAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, UpdateRequest::new); + super(UpdateAction.NAME, threadPool, clusterService, transportService, actionFilters, + indexNameExpressionResolver, UpdateRequest::new); this.updateHelper = updateHelper; this.indicesService = indicesService; this.autoCreateIndex = autoCreateIndex; @@ -114,7 +115,8 @@ public static void resolveAndValidateRouting(MetaData metaData, String concreteI protected void doExecute(Task task, final UpdateRequest request, final ActionListener listener) { // if we don't have a master, we don't have metadata, that's fine, let it find a master using create index API if (autoCreateIndex.shouldAutoCreate(request.index(), clusterService.state())) { - client.admin().indices().create(new CreateIndexRequest().index(request.index()).cause("auto(update api)").masterNodeTimeout(request.timeout()), new ActionListener() { + client.admin().indices().create(new CreateIndexRequest().index(request.index()).cause("auto(update api)") + .masterNodeTimeout(request.timeout()), new ActionListener() { @Override public void onResponse(CreateIndexResponse result) { innerExecute(task, request, listener); @@ -177,11 +179,14 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< final BytesReference upsertSourceBytes = upsertRequest.source(); client.bulk(toSingleItemBulkRequest(upsertRequest), wrapBulkResponse( ActionListener.wrap(response -> { - UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), response.getVersion(), response.getResult()); + UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), + response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), + response.getVersion(), response.getResult()); if (request.fetchSource() != null && request.fetchSource().fetchSource()) { Tuple> sourceAndContent = XContentHelper.convertToMap(upsertSourceBytes, true, upsertRequest.getContentType()); - update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes)); + update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), + sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes)); } else { update.setGetResult(null); } @@ -197,8 +202,11 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< final BytesReference indexSourceBytes = indexRequest.source(); client.bulk(toSingleItemBulkRequest(indexRequest), wrapBulkResponse( ActionListener.wrap(response -> { - UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), response.getVersion(), response.getResult()); - update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes)); + UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), + response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), + response.getVersion(), response.getResult()); + update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), + result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes)); update.setForcedRefresh(response.forcedRefresh()); listener.onResponse(update); }, exception -> handleUpdateFailureWithRetry(listener, request, exception, retryCount))) @@ -208,8 +216,11 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< DeleteRequest deleteRequest = result.action(); client.bulk(toSingleItemBulkRequest(deleteRequest), wrapBulkResponse( ActionListener.wrap(response -> { - UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), response.getVersion(), response.getResult()); - update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null)); + UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), + response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), + response.getVersion(), response.getResult()); + update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), + result.updatedSourceAsMap(), result.updateSourceContentType(), null)); update.setForcedRefresh(response.forcedRefresh()); listener.onResponse(update); }, exception -> handleUpdateFailureWithRetry(listener, request, exception, retryCount))) diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 6538c0b4b5e59..3ef89b997a149 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.update; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.DocWriteResponse; @@ -28,7 +29,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -55,7 +55,9 @@ /** * Helper for translating an update request to an index, delete request or update response. */ -public class UpdateHelper extends AbstractComponent { +public class UpdateHelper { + + private static final Logger logger = LogManager.getLogger(UpdateHelper.class); private final ScriptService scriptService; diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 6fb0580bfe3fe..a4fdce17d09a1 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -149,11 +149,13 @@ public ActionRequestValidationException validate() { } else { if (version != Versions.MATCH_ANY && retryOnConflict > 0) { - validationException = addValidationError("can't provide both retry_on_conflict and a specific version", validationException); + validationException = addValidationError("can't provide both retry_on_conflict and a specific version", + validationException); } if (!versionType.validateVersionForWrites(version)) { - validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException); + validationException = addValidationError("illegal version value [" + version + "] for version type [" + + versionType.name() + "]", validationException); } } @@ -618,8 +620,8 @@ private IndexRequest safeDoc() { } /** - * Sets the index request to be used if the document does not exists. Otherwise, a {@link org.elasticsearch.index.engine.DocumentMissingException} - * is thrown. + * Sets the index request to be used if the document does not exists. Otherwise, a + * {@link org.elasticsearch.index.engine.DocumentMissingException} is thrown. */ public UpdateRequest upsert(IndexRequest upsertRequest) { this.upsertRequest = upsertRequest; diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java index 9d1fd4a677f05..181dba6a10734 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java @@ -243,8 +243,8 @@ public UpdateRequestBuilder setDoc(XContentType xContentType, Object... source) } /** - * Sets the index request to be used if the document does not exists. Otherwise, a {@link org.elasticsearch.index.engine.DocumentMissingException} - * is thrown. + * Sets the index request to be used if the document does not exists. Otherwise, a + * {@link org.elasticsearch.index.engine.DocumentMissingException} is thrown. */ public UpdateRequestBuilder setUpsert(IndexRequest indexRequest) { request.upsert(indexRequest); diff --git a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 66ddbdcd7978d..d6ce608901714 100644 --- a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -19,6 +19,8 @@ package org.elasticsearch.client.support; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; @@ -333,7 +335,6 @@ import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentType; @@ -342,7 +343,9 @@ import java.util.Map; -public abstract class AbstractClient extends AbstractComponent implements Client { +public abstract class AbstractClient implements Client { + + protected final Logger logger; protected final Settings settings; private final ThreadPool threadPool; @@ -353,6 +356,7 @@ public AbstractClient(Settings settings, ThreadPool threadPool) { this.settings = settings; this.threadPool = threadPool; this.admin = new Admin(this); + this.logger =LogManager.getLogger(this.getClass()); this.threadedWrapper = new ThreadedActionListener.Wrapper(logger, settings, threadPool); } diff --git a/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java index b36fe45599590..7387b03ee822d 100644 --- a/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java +++ b/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -20,6 +20,8 @@ package org.elasticsearch.client.transport; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.core.internal.io.IOUtils; @@ -35,7 +37,6 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Randomness; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; @@ -70,7 +71,9 @@ import java.util.concurrent.ScheduledFuture; import java.util.concurrent.atomic.AtomicInteger; -final class TransportClientNodesService extends AbstractComponent implements Closeable { +final class TransportClientNodesService implements Closeable { + + private static final Logger logger = LogManager.getLogger(TransportClientNodesService.class); private final TimeValue nodesSamplerInterval; diff --git a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 184cbcdf859d5..559b554b007d1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; @@ -35,7 +36,6 @@ import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -62,8 +62,9 @@ * Every time the timer runs, gathers information about the disk usage and * shard sizes across the cluster. */ -public class InternalClusterInfoService extends AbstractComponent - implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener { +public class InternalClusterInfoService implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener { + + private static final Logger logger = LogManager.getLogger(InternalClusterInfoService.class); public static final Setting INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING = Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10), diff --git a/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java b/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java index 6dd40fba5cb64..5b3650ec43f37 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java @@ -19,12 +19,13 @@ package org.elasticsearch.cluster.action.index; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaDataMappingService; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -39,7 +40,9 @@ import java.io.IOException; -public class NodeMappingRefreshAction extends AbstractComponent { +public class NodeMappingRefreshAction { + + private static final Logger logger = LogManager.getLogger(NodeMappingRefreshAction.class); public static final String ACTION_NAME = "internal:cluster/node/mapping/refresh"; diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 3780a8bb9f48e..58d056f812719 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.action.shard; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; @@ -42,7 +43,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -74,7 +74,9 @@ import java.util.concurrent.ConcurrentMap; import java.util.function.Predicate; -public class ShardStateAction extends AbstractComponent { +public class ShardStateAction { + + private static final Logger logger = LogManager.getLogger(ShardStateAction.class); public static final String SHARD_STARTED_ACTION_NAME = "internal:cluster/shard/started"; public static final String SHARD_FAILED_ACTION_NAME = "internal:cluster/shard/failure"; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 9f497edf4e59a..3bb6c38dd1d9e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -51,7 +51,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.ValidationException; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.DeprecationLogger; @@ -100,7 +99,7 @@ /** * Service responsible for submitting create index requests */ -public class MetaDataCreateIndexService extends AbstractComponent { +public class MetaDataCreateIndexService { private static final Logger logger = LogManager.getLogger(MetaDataCreateIndexService.class); private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java index 39563ca7037d9..2b5bb0c94b41d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.metadata; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.delete.DeleteIndexClusterStateUpdateRequest; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; @@ -31,7 +33,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; @@ -47,7 +48,10 @@ /** * Deletes indices. */ -public class MetaDataDeleteIndexService extends AbstractComponent { +public class MetaDataDeleteIndexService { + + private static final Logger logger = LogManager.getLogger(MetaDataDeleteIndexService.class); + private final Settings settings; private final ClusterService clusterService; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index 38d83b398856e..6c24d9167ddf3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -39,7 +39,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.ValidationException; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.index.Index; @@ -59,7 +58,7 @@ /** * Service responsible for submitting open/close index requests */ -public class MetaDataIndexStateService extends AbstractComponent { +public class MetaDataIndexStateService { private static final Logger logger = LogManager.getLogger(MetaDataIndexStateService.class); private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index e397c150c1c11..5759a8aef35ff 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.alias.Alias; @@ -30,7 +32,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.ValidationException; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.IndexScopedSettings; @@ -61,7 +62,9 @@ /** * Service responsible for submitting index templates updates */ -public class MetaDataIndexTemplateService extends AbstractComponent { +public class MetaDataIndexTemplateService { + + private static final Logger logger = LogManager.getLogger(MetaDataIndexTemplateService.class); private final ClusterService clusterService; private final AliasValidator aliasValidator; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index 84fa0626317d5..6bc9104000fed 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -18,12 +18,13 @@ */ package org.elasticsearch.cluster.metadata; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.Version; import org.elasticsearch.common.TriFunction; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -51,7 +52,9 @@ * occurs during cluster upgrade, when dangling indices are imported into the cluster or indices * are restored from a repository. */ -public class MetaDataIndexUpgradeService extends AbstractComponent { +public class MetaDataIndexUpgradeService { + + private static final Logger logger = LogManager.getLogger(MetaDataIndexUpgradeService.class); private final Settings settings; private final NamedXContentRegistry xContentRegistry; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 8a65ae874f091..1832d73524161 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.metadata; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.action.ActionListener; @@ -32,7 +34,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.unit.TimeValue; @@ -57,7 +58,9 @@ /** * Service responsible for submitting mapping changes */ -public class MetaDataMappingService extends AbstractComponent { +public class MetaDataMappingService { + + private static final Logger logger = LogManager.getLogger(MetaDataMappingService.class); private final ClusterService clusterService; private final IndicesService indicesService; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java index 2284d507afa2c..011d1113455c8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java @@ -37,7 +37,6 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.regex.Regex; @@ -62,7 +61,7 @@ /** * Service responsible for submitting update index settings requests */ -public class MetaDataUpdateSettingsService extends AbstractComponent { +public class MetaDataUpdateSettingsService { private static final Logger logger = LogManager.getLogger(MetaDataUpdateSettingsService.class); private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java index 9026d26a11fd5..227dca6b739bc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java @@ -20,6 +20,8 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -34,7 +36,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.ToXContent; @@ -61,7 +62,10 @@ /** * Upgrades Templates on behalf of installed {@link Plugin}s when a node joins the cluster */ -public class TemplateUpgradeService extends AbstractComponent implements ClusterStateListener { +public class TemplateUpgradeService implements ClusterStateListener { + + private static final Logger logger = LogManager.getLogger(TemplateUpgradeService.class); + private final UnaryOperator> indexTemplateMetaDataUpgraders; public final ClusterService clusterService; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java index 81f7f68593b60..dff5771d0500a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.routing; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -26,7 +28,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -44,7 +45,9 @@ import java.util.Set; import java.util.stream.Collectors; -public class OperationRouting extends AbstractComponent { +public class OperationRouting { + + private static final Logger logger = LogManager.getLogger(OperationRouting.class); public static final Setting USE_ADAPTIVE_REPLICA_SELECTION_SETTING = Setting.boolSetting("cluster.routing.use_adaptive_replica_selection", true, diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 0bc94a93cc59a..59f43a193ddc8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; @@ -38,7 +40,6 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.gateway.GatewayAllocator; import java.util.ArrayList; @@ -61,7 +62,9 @@ * for shard allocation. This class also manages new nodes joining the cluster * and rerouting of shards. */ -public class AllocationService extends AbstractComponent { +public class AllocationService { + + private static final Logger logger = LogManager.getLogger(AllocationService.class); private final AllocationDeciders allocationDeciders; private GatewayAllocator gatewayAllocator; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java index f2447a9c4e51b..4badab5a0cafa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -25,6 +25,8 @@ import com.carrotsearch.hppc.ObjectLookupContainer; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterState; @@ -35,7 +37,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; @@ -45,7 +46,10 @@ * reroute if it does. Also responsible for logging about nodes that have * passed the disk watermarks */ -public class DiskThresholdMonitor extends AbstractComponent { +public class DiskThresholdMonitor { + + private static final Logger logger = LogManager.getLogger(DiskThresholdMonitor.class); + private final DiskThresholdSettings diskThresholdSettings; private final Client client; private final Set nodeHasPassedWatermark = Sets.newConcurrentHashSet(); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 3ffe1b7756a9b..ad5db788a8046 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation.allocator; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.IntroSorter; @@ -40,7 +41,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -77,7 +77,9 @@ * These parameters are combined in a {@link WeightFunction} that allows calculation of node weights which * are used to re-balance shards based on global as well as per-index factors. */ -public class BalancedShardsAllocator extends AbstractComponent implements ShardsAllocator { +public class BalancedShardsAllocator implements ShardsAllocator { + + private static final Logger logger = LogManager.getLogger(BalancedShardsAllocator.class); public static final Setting INDEX_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, 0.0f, Property.Dynamic, Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecider.java index 3ae86d60bd98c..bd51b7d47b335 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecider.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.routing.allocation.decider; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; @@ -31,7 +30,7 @@ * dynamic cluster- or index-wide shard allocation decisions on a per-node * basis. */ -public abstract class AllocationDecider extends AbstractComponent { +public abstract class AllocationDecider { /** * Returns a {@link Decision} whether the given shard routing can be * re-balanced to the given allocation. The default is diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java index 7f91be340fd79..5ab234c7e892d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.routing.allocation.decider; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; @@ -35,6 +37,8 @@ */ public class AllocationDeciders extends AllocationDecider { + private static final Logger logger = LogManager.getLogger(AllocationDeciders.class); + private final Collection allocations; public AllocationDeciders(Collection allocations) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java index 1ea369c75d9bc..4d309dd972818 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java @@ -21,6 +21,8 @@ import java.util.Locale; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.settings.ClusterSettings; @@ -47,6 +49,8 @@ */ public class ClusterRebalanceAllocationDecider extends AllocationDecider { + private static final Logger logger = LogManager.getLogger(ClusterRebalanceAllocationDecider.class); + public static final String NAME = "cluster_rebalance"; private static final String CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE = "cluster.routing.allocation.allow_rebalance"; public static final Setting CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING = diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java index a11b3dcf102f8..a27f46b5512f1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.routing.allocation.decider; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.settings.ClusterSettings; @@ -40,6 +42,8 @@ */ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider { + private static final Logger logger = LogManager.getLogger(ConcurrentRebalanceAllocationDecider.class); + public static final String NAME = "concurrent_rebalance"; public static final Setting CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING = diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 9676eaf4df1c3..1f048fca76c09 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -22,6 +22,8 @@ import java.util.Set; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -68,6 +70,8 @@ */ public class DiskThresholdDecider extends AllocationDecider { + private static final Logger logger = LogManager.getLogger(DiskThresholdDecider.class); + public static final String NAME = "disk_threshold"; private final DiskThresholdSettings diskThresholdSettings; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java index 7eb1b882d1ffe..1dbaa01be0936 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.routing.allocation.decider; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; @@ -30,6 +32,8 @@ */ public class SnapshotInProgressAllocationDecider extends AllocationDecider { + private static final Logger logger = LogManager.getLogger(SnapshotInProgressAllocationDecider.class); + public static final String NAME = "snapshot_in_progress"; /** diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index 0d67cd6071f08..596d3af261f17 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.routing.allocation.decider; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; @@ -52,6 +54,8 @@ */ public class ThrottlingAllocationDecider extends AllocationDecider { + private static final Logger logger = LogManager.getLogger(ThrottlingAllocationDecider.class); + public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = 2; public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = 4; public static final String NAME = "throttling"; diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java index cffdf0f4507c0..eea30dd4e530f 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -33,7 +32,7 @@ import java.nio.file.Files; import java.nio.file.Path; -public class FsBlobStore extends AbstractComponent implements BlobStore { +public class FsBlobStore implements BlobStore { private final Path path; diff --git a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index 9667341cd6ef3..b7a8f9c989596 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -19,12 +19,13 @@ package org.elasticsearch.common.settings; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.search.spell.LevenshteinDistance; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.regex.Regex; import java.util.ArrayList; @@ -47,12 +48,15 @@ * A basic setting service that can be used for per-index and per-cluster settings. * This service offers transactional application of updates settings. */ -public abstract class AbstractScopedSettings extends AbstractComponent { +public abstract class AbstractScopedSettings { + public static final String ARCHIVED_SETTINGS_PREFIX = "archived."; private static final Pattern KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])*[-\\w]+$"); private static final Pattern GROUP_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+$"); private static final Pattern AFFIX_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+[*](?:[.][-\\w]+)+$"); + protected final Logger logger = LogManager.getLogger(this.getClass()); + private final Settings settings; private final List> settingUpdaters = new CopyOnWriteArrayList<>(); private final Map> complexMatchers; diff --git a/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java b/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java index 6f2e28e97cbae..66bfdbdc194f5 100644 --- a/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java +++ b/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.util; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.recycler.AbstractRecyclerC; @@ -39,7 +38,7 @@ import static org.elasticsearch.common.recycler.Recyclers.none; /** A recycler of fixed-size pages. */ -public class PageCacheRecycler extends AbstractComponent implements Releasable { +public class PageCacheRecycler implements Releasable { public static final Setting TYPE_SETTING = new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, Property.NodeScope); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/PutRollupJobResponseTests.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/RunOnce.java similarity index 51% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/PutRollupJobResponseTests.java rename to server/src/main/java/org/elasticsearch/common/util/concurrent/RunOnce.java index ab8ef93e0c7dd..300413ac44c4a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/PutRollupJobResponseTests.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/RunOnce.java @@ -16,35 +16,35 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.client.rollup; +package org.elasticsearch.common.util.concurrent; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractXContentTestCase; -import org.junit.Before; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; -import java.io.IOException; - -public class PutRollupJobResponseTests extends AbstractXContentTestCase { - - private boolean acknowledged; +/** + * Runnable that can only be run one time. + */ +public class RunOnce implements Runnable { - @Before - public void setupJobID() { - acknowledged = randomBoolean(); - } + private final Runnable delegate; + private final AtomicBoolean hasRun; - @Override - protected PutRollupJobResponse createTestInstance() { - return new PutRollupJobResponse(acknowledged); + public RunOnce(final Runnable delegate) { + this.delegate = Objects.requireNonNull(delegate); + this.hasRun = new AtomicBoolean(false); } @Override - protected PutRollupJobResponse doParseInstance(XContentParser parser) throws IOException { - return PutRollupJobResponse.fromXContent(parser); + public void run() { + if (hasRun.compareAndSet(false, true)) { + delegate.run(); + } } - @Override - protected boolean supportsUnknownFields() { - return false; + /** + * {@code true} if the {@link RunOnce} has been executed once. + */ + public boolean hasRun() { + return hasRun.get(); } } diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java b/server/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java index ebc64fa3af1d1..8cb2f6cf672f4 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java @@ -21,7 +21,6 @@ import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -34,7 +33,7 @@ /** * Exposes common discovery settings that may be supported by all the different discovery implementations */ -public class DiscoverySettings extends AbstractComponent { +public class DiscoverySettings { public static final int NO_MASTER_BLOCK_ID = 2; public static final ClusterBlock NO_MASTER_BLOCK_ALL = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, true, false, diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java b/server/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java index 8f2853904fa0f..e9eab8fa2550d 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java @@ -20,10 +20,11 @@ package org.elasticsearch.discovery.zen; import com.carrotsearch.hppc.ObjectContainer; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -37,7 +38,9 @@ import java.util.Objects; import java.util.stream.Collectors; -public class ElectMasterService extends AbstractComponent { +public class ElectMasterService { + + private static final Logger logger = LogManager.getLogger(ElectMasterService.class); public static final Setting DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING = Setting.intSetting("discovery.zen.minimum_master_nodes", -1, Property.Dynamic, Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java b/server/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java index 3d389fc814188..5c731621179ac 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java @@ -21,9 +21,10 @@ import java.io.Closeable; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -39,7 +40,9 @@ * A base class for {@link MasterFaultDetection} & {@link NodesFaultDetection}, * making sure both use the same setting. */ -public abstract class FaultDetection extends AbstractComponent implements Closeable { +public abstract class FaultDetection implements Closeable { + + private static final Logger logger = LogManager.getLogger(FaultDetection.class); public static final Setting CONNECT_ON_NETWORK_DISCONNECT_SETTING = Setting.boolSetting("discovery.zen.fd.connect_on_network_disconnect", false, Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java b/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java index c80fceea756e4..c5030489fea64 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java @@ -19,8 +19,9 @@ package org.elasticsearch.discovery.zen; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.transport.TransportAddress; import java.io.IOException; @@ -43,7 +44,9 @@ * 67.81.244.11:9305 * 67.81.244.15:9400 */ -public class FileBasedUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { +public class FileBasedUnicastHostsProvider implements UnicastHostsProvider { + + private static final Logger logger = LogManager.getLogger(FileBasedUnicastHostsProvider.class); public static final String UNICAST_HOSTS_FILE = "unicast_hosts.txt"; diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java b/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java index b48ea77e64c75..c950312d78bc8 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java @@ -19,6 +19,8 @@ package org.elasticsearch.discovery.zen; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.ClusterName; @@ -55,6 +57,8 @@ */ public class MasterFaultDetection extends FaultDetection { + private static final Logger logger = LogManager.getLogger(MasterFaultDetection.class); + public static final String MASTER_PING_ACTION_NAME = "internal:discovery/zen/fd/master_ping"; public interface Listener { diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java b/server/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java index f699e547bf40e..097185c12567e 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java @@ -19,13 +19,14 @@ package org.elasticsearch.discovery.zen; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; @@ -44,7 +45,9 @@ import java.util.function.BiConsumer; import java.util.function.Supplier; -public class MembershipAction extends AbstractComponent { +public class MembershipAction { + + private static final Logger logger = LogManager.getLogger(MembershipAction.class); public static final String DISCOVERY_JOIN_ACTION_NAME = "internal:discovery/zen/join"; public static final String DISCOVERY_JOIN_VALIDATE_ACTION_NAME = "internal:discovery/zen/join/validate"; diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java index ecf52a6975369..2e2f72ca0fd94 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.discovery.zen; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; @@ -34,7 +35,6 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.DiscoverySettings; @@ -54,7 +54,9 @@ * This class processes incoming join request (passed zia {@link ZenDiscovery}). Incoming nodes * are directly added to the cluster state or are accumulated during master election. */ -public class NodeJoinController extends AbstractComponent { +public class NodeJoinController { + + private static final Logger logger = LogManager.getLogger(NodeJoinController.class); private final MasterService masterService; private final JoinTaskExecutor joinTaskExecutor; diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java b/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java index 40bde9ee81d15..92ac9f22ef30b 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java @@ -19,6 +19,8 @@ package org.elasticsearch.discovery.zen; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -54,6 +56,8 @@ */ public class NodesFaultDetection extends FaultDetection { + private static final Logger logger = LogManager.getLogger(NodesFaultDetection.class); + public static final String PING_ACTION_NAME = "internal:discovery/zen/fd/ping"; public abstract static class Listener { diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java b/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java index ca014af53d8c9..8288f865440dd 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java @@ -19,6 +19,8 @@ package org.elasticsearch.discovery.zen; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -30,7 +32,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.Compressor; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -67,7 +68,9 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -public class PublishClusterStateAction extends AbstractComponent { +public class PublishClusterStateAction { + + private static final Logger logger = LogManager.getLogger(PublishClusterStateAction.class); public static final String SEND_ACTION_NAME = "internal:discovery/zen/publish/send"; public static final String COMMIT_ACTION_NAME = "internal:discovery/zen/publish/commit"; diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProvider.java b/server/src/main/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProvider.java index a11e255f88878..3b16c3734156f 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProvider.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProvider.java @@ -19,7 +19,8 @@ package org.elasticsearch.discovery.zen; -import org.elasticsearch.common.component.AbstractComponent; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; @@ -38,7 +39,9 @@ * An example unicast hosts setting might look as follows: * [67.81.244.10, 67.81.244.11:9305, 67.81.244.15:9400] */ -public class SettingsBasedHostsProvider extends AbstractComponent implements UnicastHostsProvider { +public class SettingsBasedHostsProvider implements UnicastHostsProvider { + + private static final Logger logger = LogManager.getLogger(SettingsBasedHostsProvider.class); public static final Setting> DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING = Setting.listSetting("discovery.zen.ping.unicast.hosts", emptyList(), Function.identity(), Setting.Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java index 8fb9cfce0bf7a..80cf4b5f1ecb8 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java @@ -20,6 +20,7 @@ package org.elasticsearch.discovery.zen; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; @@ -29,7 +30,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lease.Releasable; @@ -91,7 +91,9 @@ import static java.util.Collections.emptySet; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; -public class UnicastZenPing extends AbstractComponent implements ZenPing { +public class UnicastZenPing implements ZenPing { + + private static final Logger logger = LogManager.getLogger(UnicastZenPing.class); public static final String ACTION_NAME = "internal:discovery/zen/unicast"; public static final Setting DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING = diff --git a/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java index ba29a08987d3c..7e4172961ea1e 100644 --- a/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java @@ -19,6 +19,7 @@ package org.elasticsearch.gateway; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; @@ -28,7 +29,6 @@ import org.elasticsearch.cluster.routing.allocation.NodeAllocationResult; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; -import org.elasticsearch.common.component.AbstractComponent; import java.util.ArrayList; import java.util.List; @@ -40,7 +40,10 @@ * Individual implementations of this class are responsible for providing * the logic to determine to which nodes (if any) those shards are allocated. */ -public abstract class BaseGatewayShardAllocator extends AbstractComponent { +public abstract class BaseGatewayShardAllocator { + + protected final Logger logger = LogManager.getLogger(this.getClass()); + /** * Allocate unassigned shards to nodes (if any) where valid copies of the shard already exist. * It is up to the individual implementations of {@link #makeAllocationDecision(ShardRouting, RoutingAllocation, Logger)} diff --git a/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java b/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java index d9eb5013e9c6c..4d7949cdf4de8 100644 --- a/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java +++ b/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java @@ -20,13 +20,14 @@ package org.elasticsearch.gateway; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.env.NodeEnvironment; @@ -50,7 +51,9 @@ * their state written on disk, but don't exists in the metadata of the cluster), and importing * them into the cluster. */ -public class DanglingIndicesState extends AbstractComponent implements ClusterStateListener { +public class DanglingIndicesState implements ClusterStateListener { + + private static final Logger logger = LogManager.getLogger(DanglingIndicesState.class); private final NodeEnvironment nodeEnv; private final MetaStateService metaStateService; diff --git a/server/src/main/java/org/elasticsearch/gateway/Gateway.java b/server/src/main/java/org/elasticsearch/gateway/Gateway.java index 7a31a089903fb..fb6807323eda9 100644 --- a/server/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/server/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -21,13 +21,14 @@ import com.carrotsearch.hppc.ObjectFloatHashMap; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.zen.ElectMasterService; @@ -37,7 +38,9 @@ import java.util.Arrays; import java.util.Map; -public class Gateway extends AbstractComponent { +public class Gateway { + + private static final Logger logger = LogManager.getLogger(Gateway.class); private final ClusterService clusterService; diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index dce92b1dd5083..67d9ab9a5bf88 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -19,6 +19,7 @@ package org.elasticsearch.gateway; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.action.support.nodes.BaseNodesResponse; @@ -30,7 +31,6 @@ import org.elasticsearch.cluster.routing.allocation.FailedShard; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; @@ -40,7 +40,9 @@ import java.util.List; import java.util.concurrent.ConcurrentMap; -public class GatewayAllocator extends AbstractComponent { +public class GatewayAllocator { + + private static final Logger logger = LogManager.getLogger(GatewayAllocator.class); private final RoutingService routingService; diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index 9bbb5af5bf028..7220d8f873334 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -20,6 +20,8 @@ package org.elasticsearch.gateway; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; @@ -32,7 +34,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.env.NodeEnvironment; @@ -57,7 +58,9 @@ import static java.util.Collections.emptySet; import static java.util.Collections.unmodifiableSet; -public class GatewayMetaState extends AbstractComponent implements ClusterStateApplier { +public class GatewayMetaState implements ClusterStateApplier { + + private static final Logger logger = LogManager.getLogger(GatewayMetaState.class); private final NodeEnvironment nodeEnv; private final MetaStateService metaStateService; diff --git a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index efdf29e2eb622..a5f4f77da438b 100644 --- a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -19,6 +19,8 @@ package org.elasticsearch.gateway; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -31,7 +33,6 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -50,7 +51,9 @@ import java.util.Arrays; import java.util.Collection; -public class LocalAllocateDangledIndices extends AbstractComponent { +public class LocalAllocateDangledIndices { + + private static final Logger logger = LogManager.getLogger(LocalAllocateDangledIndices.class); public static final String ACTION_NAME = "internal:gateway/local/allocate_dangled"; diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java index 24f5fd63662d9..66e0c6e2f06ee 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -19,11 +19,12 @@ package org.elasticsearch.gateway; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; @@ -36,7 +37,9 @@ /** * Handles writing and loading both {@link MetaData} and {@link IndexMetaData} */ -public class MetaStateService extends AbstractComponent { +public class MetaStateService { + + private static final Logger logger = LogManager.getLogger(MetaStateService.class); private final NodeEnvironment nodeEnv; private final NamedXContentRegistry namedXContentRegistry; diff --git a/server/src/main/java/org/elasticsearch/index/IndexWarmer.java b/server/src/main/java/org/elasticsearch/index/IndexWarmer.java index 277cdcaba26c8..d26693ec5d0d2 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexWarmer.java +++ b/server/src/main/java/org/elasticsearch/index/IndexWarmer.java @@ -19,9 +19,10 @@ package org.elasticsearch.index; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.DirectoryReader; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -41,7 +42,9 @@ import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; -public final class IndexWarmer extends AbstractComponent { +public final class IndexWarmer { + + private static final Logger logger = LogManager.getLogger(IndexWarmer.class); private final List listeners; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java index f7c294111dd7f..df6a291372f64 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java @@ -514,11 +514,11 @@ private void parse(ParseContext parseContext, Token token, XContentParser parser, Map inputMap) throws IOException { String currentFieldName = null; if (token == Token.VALUE_STRING) { - inputMap.put(parser.text(), new CompletionInputMetaData(parser.text(), Collections.emptyMap(), 1)); + inputMap.put(parser.text(), new CompletionInputMetaData(parser.text(), Collections.>emptyMap(), 1)); } else if (token == Token.START_OBJECT) { Set inputs = new HashSet<>(); int weight = 1; - Map> contextsMap = new HashMap<>(); + Map> contextsMap = new HashMap<>(); while ((token = parser.nextToken()) != Token.END_OBJECT) { if (token == Token.FIELD_NAME) { currentFieldName = parser.currentName(); @@ -603,10 +603,10 @@ private void parse(ParseContext parseContext, Token token, static class CompletionInputMetaData { public final String input; - public final Map> contexts; + public final Map> contexts; public final int weight; - CompletionInputMetaData(String input, Map> contexts, int weight) { + CompletionInputMetaData(String input, Map> contexts, int weight) { this.input = input; this.contexts = contexts; this.weight = weight; diff --git a/server/src/main/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskState.java b/server/src/main/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskState.java index 797d622756176..17bf59a104a80 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskState.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskState.java @@ -23,12 +23,13 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.threadpool.ThreadPool; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; @@ -188,8 +189,12 @@ public void delayPrepareBulkRequest(ThreadPool threadPool, TimeValue lastBatchSt synchronized (delayedPrepareBulkRequestReference) { TimeValue delay = throttleWaitTime(lastBatchStartTime, timeValueNanos(System.nanoTime()), lastBatchSize); logger.debug("[{}]: preparing bulk request for [{}]", task.getId(), delay); - delayedPrepareBulkRequestReference.set(new DelayedPrepareBulkRequest(threadPool, getRequestsPerSecond(), - delay, new RunOnce(prepareBulkRequestRunnable))); + try { + delayedPrepareBulkRequestReference.set(new DelayedPrepareBulkRequest(threadPool, getRequestsPerSecond(), + delay, new RunOnce(prepareBulkRequestRunnable))); + } catch (EsRejectedExecutionException e) { + prepareBulkRequestRunnable.onRejection(e); + } } } @@ -242,25 +247,17 @@ public void rethrottle(float newRequestsPerSecond) { class DelayedPrepareBulkRequest { private final ThreadPool threadPool; - private final AbstractRunnable command; + private final Runnable command; private final float requestsPerSecond; private final ScheduledFuture future; - DelayedPrepareBulkRequest(ThreadPool threadPool, float requestsPerSecond, TimeValue delay, AbstractRunnable command) { + DelayedPrepareBulkRequest(ThreadPool threadPool, float requestsPerSecond, TimeValue delay, Runnable command) { this.threadPool = threadPool; this.requestsPerSecond = requestsPerSecond; this.command = command; - this.future = threadPool.schedule(delay, ThreadPool.Names.GENERIC, new AbstractRunnable() { - @Override - protected void doRun() throws Exception { - throttledNanos.addAndGet(delay.nanos()); - command.run(); - } - - @Override - public void onFailure(Exception e) { - command.onFailure(e); - } + this.future = threadPool.schedule(delay, ThreadPool.Names.GENERIC, () -> { + throttledNanos.addAndGet(delay.nanos()); + command.run(); }); } @@ -302,29 +299,4 @@ TimeValue newDelay(long remainingDelay, float newRequestsPerSecond) { return timeValueNanos(round(remainingDelay * requestsPerSecond / newRequestsPerSecond)); } } - - /** - * Runnable that can only be run one time. This is paranoia to prevent furiously rethrottling from running the command multiple times. - * Without it the command would be run multiple times. - */ - private static class RunOnce extends AbstractRunnable { - private final AtomicBoolean hasRun = new AtomicBoolean(false); - private final AbstractRunnable delegate; - - RunOnce(AbstractRunnable delegate) { - this.delegate = delegate; - } - - @Override - protected void doRun() throws Exception { - if (hasRun.compareAndSet(false, true)) { - delegate.run(); - } - } - - @Override - public void onFailure(Exception e) { - delegate.onFailure(e); - } - } } diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java index fa2fd033bee0d..bcf1e92431587 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java @@ -160,6 +160,12 @@ public void setDefaultOperator(Operator op) { queryBuilder.setOccur(op == Operator.AND ? BooleanClause.Occur.MUST : BooleanClause.Occur.SHOULD); } + @Override + public void setPhraseSlop(int phraseSlop) { + super.setPhraseSlop(phraseSlop); + queryBuilder.setPhraseSlop(phraseSlop); + } + /** * @param type Sets how multiple fields should be combined to build textual part queries. */ diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPermits.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPermits.java index 67c48c38791f0..d5d0d7f3e9753 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPermits.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPermits.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.common.util.concurrent.ThreadContext.StoredContext; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.threadpool.ThreadPool; @@ -124,12 +125,12 @@ public void asyncBlockOperations(final ActionListener onAcquired, fi delayOperations(); threadPool.executor(ThreadPool.Names.GENERIC).execute(new AbstractRunnable() { - final AtomicBoolean released = new AtomicBoolean(false); + final RunOnce released = new RunOnce(() -> releaseDelayedOperations()); @Override public void onFailure(final Exception e) { try { - releaseDelayedOperationsIfNeeded(); // resume delayed operations as soon as possible + released.run(); // resume delayed operations as soon as possible } finally { onAcquired.onFailure(e); } @@ -142,16 +143,10 @@ protected void doRun() throws Exception { try { releasable.close(); } finally { - releaseDelayedOperationsIfNeeded(); + released.run(); } }); } - - private void releaseDelayedOperationsIfNeeded() { - if (released.compareAndSet(false, true)) { - releaseDelayedOperations(); - } - } }); } @@ -173,13 +168,11 @@ private Releasable acquireAll(final long timeout, final TimeUnit timeUnit) throw } } if (semaphore.tryAcquire(TOTAL_PERMITS, timeout, timeUnit)) { - final AtomicBoolean closed = new AtomicBoolean(); - return () -> { - if (closed.compareAndSet(false, true)) { - assert semaphore.availablePermits() == 0; - semaphore.release(TOTAL_PERMITS); - } - }; + final RunOnce release = new RunOnce(() -> { + assert semaphore.availablePermits() == 0; + semaphore.release(TOTAL_PERMITS); + }); + return release::run; } else { throw new TimeoutException("timeout while blocking operations"); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java index 683a5a79c36af..f3e631f8bf6e0 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java +++ b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.index.shard; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; @@ -26,7 +27,6 @@ import org.elasticsearch.action.resync.ResyncReplicationResponse; import org.elasticsearch.action.resync.TransportResyncReplicationAction; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -52,7 +52,9 @@ import static java.util.Objects.requireNonNull; -public class PrimaryReplicaSyncer extends AbstractComponent { +public class PrimaryReplicaSyncer { + + private static final Logger logger = LogManager.getLogger(PrimaryReplicaSyncer.class); private final TaskManager taskManager; private final SyncAction syncAction; diff --git a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java index bf3046c954829..db8d1ee29be84 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java +++ b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java @@ -19,9 +19,10 @@ package org.elasticsearch.indices; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -48,7 +49,9 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantLock; -public class IndexingMemoryController extends AbstractComponent implements IndexingOperationListener, Closeable { +public class IndexingMemoryController implements IndexingOperationListener, Closeable { + + private static final Logger logger = LogManager.getLogger(IndexingMemoryController.class); /** How much heap (% or bytes) we will share across all actively indexing shards on this node (default: 10%). */ public static final Setting INDEX_BUFFER_SIZE_SETTING = diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java b/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java index 129b839bac75a..1dfbc3af42b49 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java @@ -19,6 +19,8 @@ package org.elasticsearch.indices; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; import org.apache.lucene.search.BulkScorer; @@ -30,7 +32,6 @@ import org.apache.lucene.search.Scorer; import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.Weight; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.lucene.ShardCoreKeyMap; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -48,7 +49,9 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.function.Predicate; -public class IndicesQueryCache extends AbstractComponent implements QueryCache, Closeable { +public class IndicesQueryCache implements QueryCache, Closeable { + + private static final Logger logger = LogManager.getLogger(IndicesQueryCache.class); public static final Setting INDICES_CACHE_QUERY_SIZE_SETTING = Setting.memorySizeSetting("indices.queries.cache.size", "10%", Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java b/server/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java index 7c227b3366fee..49c2d070c0310 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java @@ -34,7 +34,6 @@ import org.elasticsearch.common.cache.CacheLoader; import org.elasticsearch.common.cache.RemovalListener; import org.elasticsearch.common.cache.RemovalNotification; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -65,8 +64,9 @@ * There are still several TODOs left in this class, some easily addressable, some more complex, but the support * is functional. */ -public final class IndicesRequestCache extends AbstractComponent implements RemovalListener, Closeable { +public final class IndicesRequestCache implements RemovalListener, Closeable { + + private static final Logger logger = LogManager.getLogger(IndicesRequestCache.class); /** * A setting to enable or disable request caching on an index level. Its dynamic by default @@ -79,8 +79,6 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo public static final Setting INDICES_CACHE_QUERY_EXPIRE = Setting.positiveTimeSetting("indices.requests.cache.expire", new TimeValue(0), Property.NodeScope); - private static final Logger LOGGER = LogManager.getLogger(IndicesRequestCache.class); - private final ConcurrentMap registeredClosedListeners = ConcurrentCollections.newConcurrentMap(); private final Set keysToClean = ConcurrentCollections.newConcurrentSet(); private final ByteSizeValue size; @@ -310,7 +308,7 @@ synchronized void cleanCache() { CleanupKey cleanupKey = iterator.next(); iterator.remove(); if (cleanupKey.readerCacheKey == null || cleanupKey.entity.isOpen() == false) { - // -1 indicates full cleanup, as does a closed shard + // null indicates full cleanup, as does a closed shard currentFullClean.add(cleanupKey.entity.getCacheIdentity()); } else { currentKeysToClean.add(cleanupKey); diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java b/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java index 8c2eb9b67b8df..08232a5ef62e1 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java @@ -18,13 +18,14 @@ */ package org.elasticsearch.indices.analysis; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.analysis.hunspell.Dictionary; import org.apache.lucene.store.Directory; import org.apache.lucene.store.SimpleFSDirectory; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -74,7 +75,9 @@ * * @see org.elasticsearch.index.analysis.HunspellTokenFilterFactory */ -public class HunspellService extends AbstractComponent { +public class HunspellService { + + private static final Logger logger = LogManager.getLogger(HunspellService.class); public static final Setting HUNSPELL_LAZY_LOAD = Setting.boolSetting("indices.analysis.hunspell.dictionary.lazy", Boolean.FALSE, Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java b/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java index a5945187de154..9d26c0fad0184 100644 --- a/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java +++ b/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java @@ -20,6 +20,8 @@ package org.elasticsearch.indices.fielddata.cache; import java.util.Collections; + +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; @@ -31,7 +33,6 @@ import org.elasticsearch.common.cache.CacheBuilder; import org.elasticsearch.common.cache.RemovalListener; import org.elasticsearch.common.cache.RemovalNotification; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Setting; @@ -50,7 +51,9 @@ import java.util.List; import java.util.function.ToLongBiFunction; -public class IndicesFieldDataCache extends AbstractComponent implements RemovalListener, Releasable{ +public class IndicesFieldDataCache implements RemovalListener, Releasable{ + + private static final Logger logger = LogManager.getLogger(IndicesFieldDataCache.class); public static final Setting INDICES_FIELDDATA_CACHE_SIZE_KEY = Setting.memorySizeSetting("indices.fielddata.cache.size", new ByteSizeValue(-1), Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index 28b5eeeba6b1b..d23e0db84d003 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.indices.flush; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -36,7 +38,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -71,7 +72,9 @@ import java.util.Map; import java.util.concurrent.ConcurrentMap; -public class SyncedFlushService extends AbstractComponent implements IndexEventListener { +public class SyncedFlushService implements IndexEventListener { + + private static final Logger logger = LogManager.getLogger(SyncedFlushService.class); private static final String PRE_SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/pre"; private static final String SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/sync"; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java index bd237ae453361..a8abce4382e1b 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java @@ -19,10 +19,11 @@ package org.elasticsearch.indices.recovery; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; @@ -48,7 +49,9 @@ * The source recovery accepts recovery requests from other peer shards and start the recovery process from this * source shard to the target shard. */ -public class PeerRecoverySourceService extends AbstractComponent implements IndexEventListener { +public class PeerRecoverySourceService implements IndexEventListener { + + private static final Logger logger = LogManager.getLogger(PeerRecoverySourceService.class); public static class Actions { public static final String START_RECOVERY = "internal:index/shard/recovery/start_recovery"; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index e461628ac4902..cdb4082b82e70 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices.recovery; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.DirectoryReader; @@ -34,7 +35,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -79,7 +79,9 @@ * Note, it can be safely assumed that there will only be a single recovery per shard (index+id) and * not several of them (since we don't allocate several shard replicas to the same node). */ -public class PeerRecoveryTargetService extends AbstractComponent implements IndexEventListener { +public class PeerRecoveryTargetService implements IndexEventListener { + + private static final Logger logger = LogManager.getLogger(PeerRecoveryTargetService.class); public static class Actions { public static final String FILES_INFO = "internal:index/shard/recovery/filesInfo"; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index b90bed90d052b..e9799b28ac901 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -19,9 +19,10 @@ package org.elasticsearch.indices.recovery; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.store.RateLimiter; import org.apache.lucene.store.RateLimiter.SimpleRateLimiter; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -30,7 +31,9 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; -public class RecoverySettings extends AbstractComponent { +public class RecoverySettings { + + private static final Logger logger = LogManager.getLogger(RecoverySettings.class); public static final Setting INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.recovery.max_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB), diff --git a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index 5db490fbb7f27..4967c43675aa6 100644 --- a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -19,6 +19,8 @@ package org.elasticsearch.indices.store; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; @@ -34,7 +36,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -70,7 +71,9 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -public class IndicesStore extends AbstractComponent implements ClusterStateListener, Closeable { +public class IndicesStore implements ClusterStateListener, Closeable { + + private static final Logger logger = LogManager.getLogger(IndicesStore.class); // TODO this class can be foled into either IndicesService and partially into IndicesClusterStateService there is no need for a separate public service public static final Setting INDICES_STORE_DELETE_SHARD_TIMEOUT = diff --git a/server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java b/server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java index 16ab96be06786..69ca66f169534 100644 --- a/server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java +++ b/server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java @@ -19,6 +19,8 @@ package org.elasticsearch.monitor.fs; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.Constants; import org.elasticsearch.cluster.ClusterInfo; @@ -26,7 +28,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeEnvironment.NodePath; @@ -40,7 +41,9 @@ import java.util.Map; import java.util.Set; -public class FsProbe extends AbstractComponent { +public class FsProbe { + + private static final Logger logger = LogManager.getLogger(FsProbe.class); private final NodeEnvironment nodeEnv; diff --git a/server/src/main/java/org/elasticsearch/monitor/fs/FsService.java b/server/src/main/java/org/elasticsearch/monitor/fs/FsService.java index 66058c9f79c30..348779ff521b5 100644 --- a/server/src/main/java/org/elasticsearch/monitor/fs/FsService.java +++ b/server/src/main/java/org/elasticsearch/monitor/fs/FsService.java @@ -19,10 +19,10 @@ package org.elasticsearch.monitor.fs; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -33,7 +33,9 @@ import java.io.IOException; -public class FsService extends AbstractComponent { +public class FsService { + + private static final Logger logger = LogManager.getLogger(FsService.class); private final FsProbe probe; private final TimeValue refreshInterval; diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java b/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java index 7e00aaa7cd99c..9d7571c6eefc9 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java @@ -113,7 +113,7 @@ private static boolean isIdleThread(ThreadInfo threadInfo) { methodName.equals("select")) { return true; } - if (className.equals("org.elasticsearch.threadpool.ThreadPool$EstimatedTimeThread") && + if (className.equals("org.elasticsearch.threadpool.ThreadPool$CachedTimeThread") && methodName.equals("run")) { return true; } diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java index eb6bd6f2f56b4..29b879ffe931f 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java @@ -19,13 +19,16 @@ package org.elasticsearch.monitor.jvm; -import org.elasticsearch.common.component.AbstractComponent; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -public class JvmService extends AbstractComponent { +public class JvmService { + + private static final Logger logger = LogManager.getLogger(JvmService.class); private final JvmInfo jvmInfo; diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsService.java b/server/src/main/java/org/elasticsearch/monitor/os/OsService.java index 3727b4dcd1860..57306ace25495 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsService.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsService.java @@ -19,7 +19,8 @@ package org.elasticsearch.monitor.os; -import org.elasticsearch.common.component.AbstractComponent; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -29,7 +30,9 @@ import java.io.IOException; -public class OsService extends AbstractComponent { +public class OsService { + + private static final Logger logger = LogManager.getLogger(OsService.class); private final OsProbe probe; private final OsInfo info; diff --git a/server/src/main/java/org/elasticsearch/monitor/process/ProcessService.java b/server/src/main/java/org/elasticsearch/monitor/process/ProcessService.java index aba7993850af5..963d865638ca9 100644 --- a/server/src/main/java/org/elasticsearch/monitor/process/ProcessService.java +++ b/server/src/main/java/org/elasticsearch/monitor/process/ProcessService.java @@ -19,14 +19,17 @@ package org.elasticsearch.monitor.process; -import org.elasticsearch.common.component.AbstractComponent; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.SingleObjectCache; -public final class ProcessService extends AbstractComponent { +public final class ProcessService { + + private static final Logger logger = LogManager.getLogger(ProcessService.class); private final ProcessProbe probe; private final ProcessInfo info; diff --git a/server/src/main/java/org/elasticsearch/node/NodeService.java b/server/src/main/java/org/elasticsearch/node/NodeService.java index fe9c3d59d6646..0567641b8a5d6 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeService.java +++ b/server/src/main/java/org/elasticsearch/node/NodeService.java @@ -28,7 +28,6 @@ import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.discovery.Discovery; @@ -45,7 +44,7 @@ import java.io.Closeable; import java.io.IOException; -public class NodeService extends AbstractComponent implements Closeable { +public class NodeService implements Closeable { private final Settings settings; private final ThreadPool threadPool; private final MonitorService monitorService; diff --git a/server/src/main/java/org/elasticsearch/node/ResponseCollectorService.java b/server/src/main/java/org/elasticsearch/node/ResponseCollectorService.java index 8885728927b34..4f9ec9488b430 100644 --- a/server/src/main/java/org/elasticsearch/node/ResponseCollectorService.java +++ b/server/src/main/java/org/elasticsearch/node/ResponseCollectorService.java @@ -24,7 +24,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ExponentiallyWeightedMovingAverage; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -42,7 +41,7 @@ * tasks executed on each node, making the EWMA of the values available to the * coordinating node. */ -public final class ResponseCollectorService extends AbstractComponent implements ClusterStateListener { +public final class ResponseCollectorService implements ClusterStateListener { private static final double ALPHA = 0.3; diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java index 1ebc8332e42d5..2adeb04e4eec0 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java @@ -19,6 +19,8 @@ package org.elasticsearch.persistent; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; @@ -29,7 +31,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; @@ -41,7 +42,9 @@ /** * Component that runs only on the master node and is responsible for assigning running tasks to nodes */ -public class PersistentTasksClusterService extends AbstractComponent implements ClusterStateListener { +public class PersistentTasksClusterService implements ClusterStateListener { + + private static final Logger logger = LogManager.getLogger(PersistentTasksClusterService.class); private final ClusterService clusterService; private final PersistentTasksExecutorRegistry registry; diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java index a02efb6805729..ab674a79c4e52 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java @@ -22,7 +22,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.tasks.TaskId; @@ -34,7 +33,7 @@ * An executor of tasks that can survive restart of requesting or executing node. * These tasks are using cluster state rather than only transport service to send requests and responses. */ -public abstract class PersistentTasksExecutor extends AbstractComponent { +public abstract class PersistentTasksExecutor { private final String executor; private final String taskName; diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java index a90415b530b43..260fabc67cdca 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java @@ -18,13 +18,14 @@ */ package org.elasticsearch.persistent; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -48,7 +49,9 @@ * This component is responsible for coordination of execution of persistent tasks on individual nodes. It runs on all * non-transport client nodes in the cluster and monitors cluster state changes to detect started commands. */ -public class PersistentTasksNodeService extends AbstractComponent implements ClusterStateListener { +public class PersistentTasksNodeService implements ClusterStateListener { + + private static final Logger logger = LogManager.getLogger(PersistentTasksNodeService.class); private final Map runningTasks = new HashMap<>(); private final PersistentTasksService persistentTasksService; diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java index 96775b74ea934..301c0a21ea592 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.persistent; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; @@ -29,7 +31,6 @@ import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.node.NodeClosedException; @@ -45,7 +46,9 @@ * to the master node so that the master can update the cluster state and can track of the states * of the persistent tasks. */ -public class PersistentTasksService extends AbstractComponent { +public class PersistentTasksService { + + private static final Logger logger = LogManager.getLogger(PersistentTasksService.class); private static final String ACTION_ORIGIN_TRANSIENT_NAME = "action.origin"; private static final String PERSISTENT_TASK_ORIGIN = "persistent_tasks"; diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java index 5dfdeb09095e3..2ad9a0892b8f6 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -33,7 +33,6 @@ import org.elasticsearch.bootstrap.JarHell; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.io.FileSystemUtils; @@ -70,7 +69,9 @@ import static org.elasticsearch.common.io.FileSystemUtils.isAccessibleDirectory; -public class PluginsService extends AbstractComponent { +public class PluginsService { + + private static final Logger logger = LogManager.getLogger(PluginsService.class); private final Settings settings; private final Path configPath; diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index e97f7acf168f7..295b64c554f6e 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -19,6 +19,8 @@ package org.elasticsearch.repositories; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; @@ -32,7 +34,6 @@ import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; @@ -53,7 +54,9 @@ /** * Service responsible for maintaining and providing access to snapshot repositories on nodes. */ -public class RepositoriesService extends AbstractComponent implements ClusterStateApplier { +public class RepositoriesService implements ClusterStateApplier { + + private static final Logger logger = LogManager.getLogger(RepositoriesService.class); private final Map typesRegistry; diff --git a/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java b/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java index 59b79aedf959c..b0e98f3fa39b2 100644 --- a/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java @@ -21,12 +21,13 @@ import com.carrotsearch.hppc.ObjectContainer; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.repositories.RepositoriesService.VerifyResponse; @@ -46,7 +47,10 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicInteger; -public class VerifyNodeRepositoryAction extends AbstractComponent { +public class VerifyNodeRepositoryAction { + + private static final Logger logger = LogManager.getLogger(VerifyNodeRepositoryAction.class); + public static final String ACTION_NAME = "internal:admin/repository/verify"; private final TransportService transportService; diff --git a/server/src/main/java/org/elasticsearch/rest/RestController.java b/server/src/main/java/org/elasticsearch/rest/RestController.java index 85a3b0bdb4531..a55005454f629 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestController.java +++ b/server/src/main/java/org/elasticsearch/rest/RestController.java @@ -19,6 +19,8 @@ package org.elasticsearch.rest; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.client.node.NodeClient; @@ -26,7 +28,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.path.PathTrie; @@ -60,7 +61,9 @@ import static org.elasticsearch.rest.RestStatus.OK; import static org.elasticsearch.rest.BytesRestResponse.TEXT_CONTENT_TYPE; -public class RestController extends AbstractComponent implements HttpServerTransport.Dispatcher { +public class RestController implements HttpServerTransport.Dispatcher { + + private static final Logger logger = LogManager.getLogger(RestController.class); private final PathTrie handlers = new PathTrie<>(RestUtils.REST_DECODER); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java index d1a97d74d047f..3486a5a4cb51b 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.rest.action.admin.indices; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.admin.indices.validate.query.QueryExplanation; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; @@ -26,6 +27,7 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; @@ -43,6 +45,11 @@ import static org.elasticsearch.rest.RestStatus.OK; public class RestValidateQueryAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( + LogManager.getLogger(RestValidateQueryAction.class)); + static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + + " Specifying types in validate query requests is deprecated."; + public RestValidateQueryAction(Settings settings, RestController controller) { super(settings); controller.registerHandler(GET, "/_validate/query", this); @@ -63,7 +70,12 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC ValidateQueryRequest validateQueryRequest = new ValidateQueryRequest(Strings.splitStringByCommaToArray(request.param("index"))); validateQueryRequest.indicesOptions(IndicesOptions.fromRequest(request, validateQueryRequest.indicesOptions())); validateQueryRequest.explain(request.paramAsBoolean("explain", false)); - validateQueryRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); + + if (request.hasParam("type")) { + deprecationLogger.deprecated(TYPES_DEPRECATION_MESSAGE); + validateQueryRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); + } + validateQueryRequest.rewrite(request.paramAsBoolean("rewrite", false)); validateQueryRequest.allShards(request.paramAsBoolean("all_shards", false)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestCountAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java similarity index 88% rename from server/src/main/java/org/elasticsearch/rest/action/document/RestCountAction.java rename to server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java index a66b17cc5a5e5..99ea1c81fa956 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestCountAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java @@ -17,13 +17,15 @@ * under the License. */ -package org.elasticsearch.rest.action.document; +package org.elasticsearch.rest.action.search; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -44,6 +46,11 @@ import static org.elasticsearch.search.internal.SearchContext.DEFAULT_TERMINATE_AFTER; public class RestCountAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( + LogManager.getLogger(RestCountAction.class)); + static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + + " Specifying types in count requests is deprecated."; + public RestCountAction(Settings settings, RestController controller) { super(settings); controller.registerHandler(POST, "/_count", this); @@ -80,7 +87,12 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC if (minScore != -1f) { searchSourceBuilder.minScore(minScore); } - countRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); + + if (request.hasParam("type")) { + deprecationLogger.deprecated(TYPES_DEPRECATION_MESSAGE); + countRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); + } + countRequest.preference(request.param("preference")); final int terminateAfter = request.paramAsInt("terminate_after", DEFAULT_TERMINATE_AFTER); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index 6239015dae418..d3a45fa727b26 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.rest.action.search; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -27,6 +28,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentParser; @@ -46,9 +48,13 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestMultiSearchAction extends BaseRestHandler { - private static final Set RESPONSE_PARAMS = Collections.singleton(RestSearchAction.TYPED_KEYS_PARAM); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( + LogManager.getLogger(RestMultiSearchAction.class)); + static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + + " Specifying types in multi search requests is deprecated."; + private final boolean allowExplicitIndex; public RestMultiSearchAction(Settings settings, RestController controller) { @@ -96,6 +102,9 @@ public static MultiSearchRequest parseRequest(RestRequest restRequest, boolean a } parseMultiLineRequest(restRequest, multiRequest.indicesOptions(), allowExplicitIndex, (searchRequest, parser) -> { + if (searchRequest.types().length > 0) { + deprecationLogger.deprecated(TYPES_DEPRECATION_MESSAGE); + } searchRequest.source(SearchSourceBuilder.fromXContent(parser, false)); multiRequest.add(searchRequest); }); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 0a0bf1b6e38eb..60fd77e46aa3f 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -54,10 +54,12 @@ import static org.elasticsearch.search.suggest.SuggestBuilders.termSuggestion; public class RestSearchAction extends BaseRestHandler { - public static final String TYPED_KEYS_PARAM = "typed_keys"; private static final Set RESPONSE_PARAMS = Collections.singleton(TYPED_KEYS_PARAM); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestSearchAction.class)); + static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + + " Specifying types in search requests is deprecated."; public RestSearchAction(Settings settings, RestController controller) { super(settings); @@ -150,11 +152,10 @@ public static void parseSearchRequest(SearchRequest searchRequest, RestRequest r searchRequest.scroll(new Scroll(parseTimeValue(scroll, null, "scroll"))); } - String types = request.param("type"); - if (types != null) { - deprecationLogger.deprecated("The {index}/{type}/_search endpoint is deprecated, use {index}/_search instead"); + if (request.hasParam("type")) { + deprecationLogger.deprecated(TYPES_DEPRECATION_MESSAGE); + searchRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); } - searchRequest.types(Strings.splitStringByCommaToArray(types)); searchRequest.routing(request.param("routing")); searchRequest.preference(request.param("preference")); searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions())); diff --git a/server/src/main/java/org/elasticsearch/script/BucketAggregationScript.java b/server/src/main/java/org/elasticsearch/script/BucketAggregationScript.java index 5fa8d1fbf94a4..76ff776353ef2 100644 --- a/server/src/main/java/org/elasticsearch/script/BucketAggregationScript.java +++ b/server/src/main/java/org/elasticsearch/script/BucketAggregationScript.java @@ -46,7 +46,7 @@ public Map getParams() { return params; } - public abstract Double execute(); + public abstract Number execute(); public interface Factory { BucketAggregationScript newInstance(Map params); diff --git a/server/src/main/java/org/elasticsearch/script/ScriptService.java b/server/src/main/java/org/elasticsearch/script/ScriptService.java index 7c4011a63c412..6eea53a9b7dd0 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptService.java @@ -19,6 +19,8 @@ package org.elasticsearch.script; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; @@ -39,7 +41,6 @@ import org.elasticsearch.common.cache.RemovalListener; import org.elasticsearch.common.cache.RemovalNotification; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -58,7 +59,9 @@ import java.util.Set; import java.util.function.Function; -public class ScriptService extends AbstractComponent implements Closeable, ClusterStateApplier { +public class ScriptService implements Closeable, ClusterStateApplier { + + private static final Logger logger = LogManager.getLogger(ScriptService.class); static final String DISABLE_DYNAMIC_SCRIPTING_SETTING = "script.disable_dynamic"; diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index 7eb1139705dfc..4532385f31381 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -183,7 +183,11 @@ public String getId() { /** * The type of the document. + * + * @deprecated Types are in the process of being removed. Instead of using a type, prefer to + * filter on a field on the document. */ + @Deprecated public String getType() { return type != null ? type.string() : null; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java index e5ecbd6d00e20..6b5329e0a70b4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java @@ -24,6 +24,8 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; @@ -178,8 +180,19 @@ public void collect(int doc, long bucket) throws IOException { }; } + static Query extractQuery(Query query) { + if (query instanceof BoostQuery) { + return extractQuery(((BoostQuery) query).getQuery()); + } else if (query instanceof IndexOrDocValuesQuery) { + return extractQuery(((IndexOrDocValuesQuery) query).getIndexQuery()); + } else { + return query; + } + } + @Override SortedDocsProducer createSortedDocsProducerOrNull(IndexReader reader, Query query) { + query = extractQuery(query); if (checkIfSortedDocsIsApplicable(reader, fieldType) == false || (query != null && query.getClass() != MatchAllDocsQuery.class && diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregator.java index bd9371815faae..d19425e3e0359 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregator.java @@ -108,13 +108,13 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext if (skipBucket) { newBuckets.add(bucket); } else { - Double returned = factory.newInstance(vars).execute(); + Number returned = factory.newInstance(vars).execute(); if (returned == null) { newBuckets.add(bucket); } else { final List aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map( (p) -> (InternalAggregation) p).collect(Collectors.toList()); - aggs.add(new InternalSimpleValue(name(), returned, formatter, new ArrayList<>(), metaData())); + aggs.add(new InternalSimpleValue(name(), returned.doubleValue(), formatter, new ArrayList<>(), metaData())); InternalMultiBucketAggregation.InternalBucket newBucket = originalAgg.createBucket(new InternalAggregations(aggs), bucket); newBuckets.add(newBucket); diff --git a/server/src/main/java/org/elasticsearch/search/profile/ProfileResult.java b/server/src/main/java/org/elasticsearch/search/profile/ProfileResult.java index d0965bca1a9b2..e54639bdb764a 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/ProfileResult.java +++ b/server/src/main/java/org/elasticsearch/search/profile/ProfileResult.java @@ -196,9 +196,9 @@ public static ProfileResult fromXContent(XContentParser parser) throws IOExcepti } else if (token == XContentParser.Token.START_OBJECT) { if (BREAKDOWN.match(currentFieldName, parser.getDeprecationHandler())) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - ensureExpectedToken(parser.currentToken(), XContentParser.Token.FIELD_NAME, parser::getTokenLocation); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation); String name = parser.currentName(); - ensureExpectedToken(parser.nextToken(), XContentParser.Token.VALUE_NUMBER, parser::getTokenLocation); + ensureExpectedToken(XContentParser.Token.VALUE_NUMBER, parser.nextToken(), parser::getTokenLocation); long value = parser.longValue(); timings.put(name, value); } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java index eaadd399abbca..60ad277deb2f7 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java @@ -61,7 +61,7 @@ protected Suggest.Suggestion> contexts = Collections.emptyMap(); + Map> contexts = Collections.emptyMap(); if (fieldType.hasContextMappings()) { List rawContexts = collector.getContexts(suggestDoc.doc); if (rawContexts.size() > 0) { diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java index 12ba58a2edee2..5dea2d8c1a0ef 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java @@ -280,13 +280,13 @@ public static Entry fromXContent(XContentParser parser) { } public static class Option extends Suggest.Suggestion.Entry.Option { - private Map> contexts = Collections.emptyMap(); + private Map> contexts = Collections.emptyMap(); private ScoreDoc doc; private SearchHit hit; public static final ParseField CONTEXTS = new ParseField("contexts"); - public Option(int docID, Text text, float score, Map> contexts) { + public Option(int docID, Text text, float score, Map> contexts) { super(text, score); this.doc = new ScoreDoc(docID, score); this.contexts = Objects.requireNonNull(contexts, "context map cannot be null"); @@ -307,7 +307,7 @@ public Option(StreamInput in) throws IOException { for (int i = 0; i < contextSize; i++) { String contextName = in.readString(); int nContexts = in.readVInt(); - Set contexts = new HashSet<>(nContexts); + Set contexts = new HashSet<>(nContexts); for (int j = 0; j < nContexts; j++) { contexts.add(in.readString()); } @@ -322,7 +322,7 @@ protected void mergeInto(Suggest.Suggestion.Entry.Option otherOption) { throw new UnsupportedOperationException(); } - public Map> getContexts() { + public Map> getContexts() { return contexts; } @@ -352,7 +352,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } if (contexts.size() > 0) { builder.startObject(CONTEXTS.getPreferredName()); - for (Map.Entry> entry : contexts.entrySet()) { + for (Map.Entry> entry : contexts.entrySet()) { builder.startArray(entry.getKey()); for (CharSequence context : entry.getValue()) { builder.value(context.toString()); @@ -377,13 +377,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws (p,c) -> parseContexts(p), CompletionSuggestion.Entry.Option.CONTEXTS); } - private static Map> parseContexts(XContentParser parser) throws IOException { - Map> contexts = new HashMap<>(); + private static Map> parseContexts(XContentParser parser) throws IOException { + Map> contexts = new HashMap<>(); while((parser.nextToken()) != XContentParser.Token.END_OBJECT) { ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation); String key = parser.currentName(); ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.nextToken(), parser::getTokenLocation); - Set values = new HashSet<>(); + Set values = new HashSet<>(); while((parser.nextToken()) != XContentParser.Token.END_ARRAY) { ensureExpectedToken(XContentParser.Token.VALUE_STRING, parser.currentToken(), parser::getTokenLocation); values.add(parser.text()); @@ -399,7 +399,7 @@ public static Option fromXContent(XContentParser parser) { Text text = new Text((String) values.get(Suggestion.Entry.Option.TEXT.getPreferredName())); Float score = (Float) values.get(Suggestion.Entry.Option.SCORE.getPreferredName()); @SuppressWarnings("unchecked") - Map> contexts = (Map>) values + Map> contexts = (Map>) values .get(CompletionSuggestion.Entry.Option.CONTEXTS.getPreferredName()); if (contexts == null) { contexts = Collections.emptyMap(); @@ -427,7 +427,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } out.writeInt(contexts.size()); - for (Map.Entry> entry : contexts.entrySet()) { + for (Map.Entry> entry : contexts.entrySet()) { out.writeString(entry.getKey()); out.writeVInt(entry.getValue().size()); for (CharSequence ctx : entry.getValue()) { @@ -444,7 +444,7 @@ public String toString() { stringBuilder.append(" score:"); stringBuilder.append(getScore()); stringBuilder.append(" context:["); - for (Map.Entry> entry: contexts.entrySet()) { + for (Map.Entry> entry: contexts.entrySet()) { stringBuilder.append(" "); stringBuilder.append(entry.getKey()); stringBuilder.append(":"); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryContextMapping.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryContextMapping.java index 073e7da3accb2..d935bc1050ecd 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryContextMapping.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryContextMapping.java @@ -111,9 +111,9 @@ protected XContentBuilder toInnerXContent(XContentBuilder builder, Params params * */ @Override - public Set parseContext(ParseContext parseContext, XContentParser parser) + public Set parseContext(ParseContext parseContext, XContentParser parser) throws IOException, ElasticsearchParseException { - final Set contexts = new HashSet<>(); + final Set contexts = new HashSet<>(); Token token = parser.currentToken(); if (token == Token.VALUE_STRING || token == Token.VALUE_NUMBER || token == Token.VALUE_BOOLEAN) { contexts.add(parser.text()); @@ -134,8 +134,8 @@ public Set parseContext(ParseContext parseContext, XContentParser } @Override - public Set parseContext(Document document) { - Set values = null; + public Set parseContext(Document document) { + Set values = null; if (fieldName != null) { IndexableField[] fields = document.getFields(fieldName); values = new HashSet<>(fields.length); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java index 0d0c7e9458910..ba24e8eb6aa1a 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java @@ -95,12 +95,12 @@ public String name() { /** * Parses a set of index-time contexts. */ - public abstract Set parseContext(ParseContext parseContext, XContentParser parser) throws IOException, ElasticsearchParseException; + public abstract Set parseContext(ParseContext parseContext, XContentParser parser) throws IOException, ElasticsearchParseException; /** * Retrieves a set of context from a document at index-time. */ - protected abstract Set parseContext(ParseContext.Document document); + protected abstract Set parseContext(ParseContext.Document document); /** * Prototype for the query context diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java index b4c3276b946b2..8ff446d1c5de6 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java @@ -22,6 +22,7 @@ import org.apache.lucene.search.suggest.document.CompletionQuery; import org.apache.lucene.search.suggest.document.ContextQuery; import org.apache.lucene.search.suggest.document.ContextSuggestField; +import org.apache.lucene.util.CharsRef; import org.apache.lucene.util.CharsRefBuilder; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; @@ -94,7 +95,7 @@ public ContextMapping get(String name) { * Adds a context-enabled field for all the defined mappings to document * see {@link org.elasticsearch.search.suggest.completion.context.ContextMappings.TypedContextField} */ - public void addField(ParseContext.Document document, String name, String input, int weight, Map> contexts) { + public void addField(ParseContext.Document document, String name, String input, int weight, Map> contexts) { document.add(new TypedContextField(name, input, weight, contexts, document)); } @@ -121,10 +122,10 @@ public Iterator> iterator() { * at index time */ private class TypedContextField extends ContextSuggestField { - private final Map> contexts; + private final Map> contexts; private final ParseContext.Document document; - TypedContextField(String name, String value, int weight, Map> contexts, + TypedContextField(String name, String value, int weight, Map> contexts, ParseContext.Document document) { super(name, value, weight); this.contexts = contexts; @@ -133,18 +134,18 @@ private class TypedContextField extends ContextSuggestField { @Override protected Iterable contexts() { - Set typedContexts = new HashSet<>(); + Set typedContexts = new HashSet<>(); final CharsRefBuilder scratch = new CharsRefBuilder(); scratch.grow(1); for (int typeId = 0; typeId < contextMappings.size(); typeId++) { scratch.setCharAt(0, (char) typeId); scratch.setLength(1); ContextMapping mapping = contextMappings.get(typeId); - Set contexts = new HashSet<>(mapping.parseContext(document)); + Set contexts = new HashSet<>(mapping.parseContext(document)); if (this.contexts.get(mapping.name()) != null) { contexts.addAll(this.contexts.get(mapping.name())); } - for (CharSequence context : contexts) { + for (String context : contexts) { scratch.append(context); typedContexts.add(scratch.toCharsRef()); scratch.setLength(1); @@ -153,7 +154,7 @@ protected Iterable contexts() { if (typedContexts.isEmpty()) { throw new IllegalArgumentException("Contexts are mandatory in context enabled completion field [" + name + "]"); } - return typedContexts; + return new ArrayList(typedContexts); } } @@ -198,18 +199,18 @@ public ContextQuery toContextQuery(CompletionQuery query, Map> getNamedContexts(List contexts) { - Map> contextMap = new HashMap<>(contexts.size()); + public Map> getNamedContexts(List contexts) { + Map> contextMap = new HashMap<>(contexts.size()); for (CharSequence typedContext : contexts) { int typeId = typedContext.charAt(0); assert typeId < contextMappings.size() : "Returned context has invalid type"; ContextMapping mapping = contextMappings.get(typeId); - Set contextEntries = contextMap.get(mapping.name()); + Set contextEntries = contextMap.get(mapping.name()); if (contextEntries == null) { contextEntries = new HashSet<>(); contextMap.put(mapping.name(), contextEntries); } - contextEntries.add(typedContext.subSequence(1, typedContext.length())); + contextEntries.add(typedContext.subSequence(1, typedContext.length()).toString()); } return contextMap; } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java index b908fadd55845..9f01c9a15b5a5 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java @@ -144,14 +144,14 @@ protected XContentBuilder toInnerXContent(XContentBuilder builder, Params params * see {@code GeoPoint(String)} for GEO POINT */ @Override - public Set parseContext(ParseContext parseContext, XContentParser parser) throws IOException, ElasticsearchParseException { + public Set parseContext(ParseContext parseContext, XContentParser parser) throws IOException, ElasticsearchParseException { if (fieldName != null) { MappedFieldType fieldType = parseContext.mapperService().fullName(fieldName); if (!(fieldType instanceof GeoPointFieldMapper.GeoPointFieldType)) { throw new ElasticsearchParseException("referenced field must be mapped to geo_point"); } } - final Set contexts = new HashSet<>(); + final Set contexts = new HashSet<>(); Token token = parser.currentToken(); if (token == Token.START_ARRAY) { token = parser.nextToken(); @@ -178,7 +178,7 @@ public Set parseContext(ParseContext parseContext, XContentParser } else if (token == Token.VALUE_STRING) { final String geoHash = parser.text(); final CharSequence truncatedGeoHash = geoHash.subSequence(0, Math.min(geoHash.length(), precision)); - contexts.add(truncatedGeoHash); + contexts.add(truncatedGeoHash.toString()); } else { // or a single location GeoPoint point = GeoUtils.parseGeoPoint(parser); @@ -188,8 +188,8 @@ public Set parseContext(ParseContext parseContext, XContentParser } @Override - public Set parseContext(Document document) { - final Set geohashes = new HashSet<>(); + public Set parseContext(Document document) { + final Set geohashes = new HashSet<>(); if (fieldName != null) { IndexableField[] fields = document.getFields(fieldName); @@ -222,10 +222,10 @@ public Set parseContext(Document document) { } } - Set locations = new HashSet<>(); - for (CharSequence geohash : geohashes) { + Set locations = new HashSet<>(); + for (String geohash : geohashes) { int precision = Math.min(this.precision, geohash.length()); - CharSequence truncatedGeohash = geohash.subSequence(0, precision); + String truncatedGeohash = geohash.substring(0, precision); locations.add(truncatedGeohash); } return locations; diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 7e1873c3f0a6e..34046c205afcb 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -22,6 +22,7 @@ import com.carrotsearch.hppc.IntSet; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.Version; @@ -56,7 +57,6 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.regex.Regex; @@ -115,7 +115,9 @@ * which removes {@link RestoreInProgress} when all shards are completed. In case of * restore failure a normal recovery fail-over process kicks in. */ -public class RestoreService extends AbstractComponent implements ClusterStateApplier { +public class RestoreService implements ClusterStateApplier { + + private static final Logger logger = LogManager.getLogger(RestoreService.class); private static final Set UNMODIFIABLE_SETTINGS = unmodifiableSet(newHashSet( SETTING_NUMBER_OF_SHARDS, diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java index bb93eac6af8ff..92c86a04cdb5d 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java @@ -19,6 +19,8 @@ package org.elasticsearch.tasks; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; @@ -29,7 +31,6 @@ import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -56,7 +57,10 @@ /** * Task Manager service for keeping track of currently running tasks on the nodes */ -public class TaskManager extends AbstractComponent implements ClusterStateApplier { +public class TaskManager implements ClusterStateApplier { + + private static final Logger logger = LogManager.getLogger(TaskManager.class); + private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100); /** Rest headers that are copied to the task */ diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java b/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java index 86c546370b704..8a3d237b443f1 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.tasks; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -34,7 +36,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; @@ -52,7 +53,9 @@ /** * Service that can store task results. */ -public class TaskResultsService extends AbstractComponent { +public class TaskResultsService { + + private static final Logger logger = LogManager.getLogger(TaskResultsService.class); public static final String TASK_INDEX = ".tasks"; diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index 91d5d9fa3717e..8acfba5b5a7ef 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -19,11 +19,12 @@ package org.elasticsearch.threadpool; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.Counter; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -61,7 +62,9 @@ import static java.util.Collections.unmodifiableMap; -public class ThreadPool extends AbstractComponent implements Scheduler, Closeable { +public class ThreadPool implements Scheduler, Closeable { + + private static final Logger logger = LogManager.getLogger(ThreadPool.class); public static class Names { public static final String SAME = "same"; diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index fdb3745d130dc..8da433fdc6c8e 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.SettingUpgrader; @@ -54,7 +53,7 @@ /** * Base class for all services and components that need up-to-date information about the registered remote clusters */ -public abstract class RemoteClusterAware extends AbstractComponent { +public abstract class RemoteClusterAware { static { // remove search.remote.* settings in 8.0.0 diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index c1961b266cb7d..c78954a8f5337 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.transport; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.SetOnce; @@ -33,7 +35,6 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; @@ -81,7 +82,9 @@ * {@link RemoteClusterService#REMOTE_CONNECTIONS_PER_CLUSTER} until either all eligible nodes are exhausted or the maximum number of * connections per cluster has been reached. */ -final class RemoteClusterConnection extends AbstractComponent implements TransportConnectionListener, Closeable { +final class RemoteClusterConnection implements TransportConnectionListener, Closeable { + + private static final Logger logger = LogManager.getLogger(RemoteClusterConnection.class); private final TransportService transportService; private final ConnectionManager connectionManager; diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index de0e550f2f736..7382f345c46ef 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -19,6 +19,8 @@ package org.elasticsearch.transport; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; @@ -67,6 +69,8 @@ */ public final class RemoteClusterService extends RemoteClusterAware implements Closeable { + private static final Logger logger = LogManager.getLogger(RemoteClusterService.class); + static { // remove search.remote.* settings in 8.0.0 assert Version.CURRENT.major < 8; diff --git a/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java b/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java index b45449425cb24..ad2447cb7b3d0 100644 --- a/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java +++ b/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java @@ -87,7 +87,8 @@ public void onFailure(Exception e) { if (response instanceof SearchResponse) { SearchResponse searchResponse = (SearchResponse) response; for (ShardSearchFailure failure : searchResponse.getShardFailures()) { - assertTrue("got unexpected reason..." + failure.reason(), failure.reason().toLowerCase(Locale.ENGLISH).contains("rejected")); + assertTrue("got unexpected reason..." + failure.reason(), + failure.reason().toLowerCase(Locale.ENGLISH).contains("rejected")); } } else { Exception t = (Exception) response; @@ -95,7 +96,8 @@ public void onFailure(Exception e) { if (unwrap instanceof SearchPhaseExecutionException) { SearchPhaseExecutionException e = (SearchPhaseExecutionException) unwrap; for (ShardSearchFailure failure : e.shardFailures()) { - assertTrue("got unexpected reason..." + failure.reason(), failure.reason().toLowerCase(Locale.ENGLISH).contains("rejected")); + assertTrue("got unexpected reason..." + failure.reason(), + failure.reason().toLowerCase(Locale.ENGLISH).contains("rejected")); } } else if ((unwrap instanceof EsRejectedExecutionException) == false) { throw new AssertionError("unexpected failure", (Throwable) response); diff --git a/server/src/test/java/org/elasticsearch/action/admin/HotThreadsIT.java b/server/src/test/java/org/elasticsearch/action/admin/HotThreadsIT.java index 5d51253882173..7c00705b2a28e 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/HotThreadsIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/HotThreadsIT.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESIntegTestCase; +import org.hamcrest.Matcher; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -38,7 +39,9 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.not; public class HotThreadsIT extends ESIntegTestCase { @@ -117,7 +120,8 @@ public void onFailure(Exception e) { assertHitCount( client().prepareSearch() .setQuery(matchAllQuery()) - .setPostFilter(boolQuery().must(matchAllQuery()).mustNot(boolQuery().must(termQuery("field1", "value1")).must(termQuery("field1", "value2")))) + .setPostFilter(boolQuery().must(matchAllQuery()).mustNot(boolQuery() + .must(termQuery("field1", "value1")).must(termQuery("field1", "value2")))) .get(), 3L); } @@ -135,9 +139,13 @@ public void testIgnoreIdleThreads() throws ExecutionException, InterruptedExcept builder.setThreads(Integer.MAX_VALUE); NodesHotThreadsResponse response = builder.execute().get(); + final Matcher containsCachedTimeThreadRunMethod + = containsString("org.elasticsearch.threadpool.ThreadPool$CachedTimeThread.run"); + int totSizeAll = 0; for (NodeHotThreads node : response.getNodesMap().values()) { totSizeAll += node.getHotThreads().length(); + assertThat(node.getHotThreads(), containsCachedTimeThreadRunMethod); } // Second time, do ignore idle threads: @@ -151,6 +159,7 @@ public void testIgnoreIdleThreads() throws ExecutionException, InterruptedExcept int totSizeIgnoreIdle = 0; for (NodeHotThreads node : response.getNodesMap().values()) { totSizeIgnoreIdle += node.getHotThreads().length(); + assertThat(node.getHotThreads(), not(containsCachedTimeThreadRunMethod)); } // The filtered stacks should be smaller than unfiltered ones: diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java index 8c1438815250a..012f801698f96 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java @@ -70,7 +70,8 @@ public void testClusterHealth() throws IOException { int inFlight = randomIntBetween(0, 200); int delayedUnassigned = randomIntBetween(0, 200); TimeValue pendingTaskInQueueTime = TimeValue.timeValueMillis(randomIntBetween(1000, 100000)); - ClusterHealthResponse clusterHealth = new ClusterHealthResponse("bla", new String[] {MetaData.ALL}, clusterState, pendingTasks, inFlight, delayedUnassigned, pendingTaskInQueueTime); + ClusterHealthResponse clusterHealth = new ClusterHealthResponse("bla", new String[] {MetaData.ALL}, + clusterState, pendingTasks, inFlight, delayedUnassigned, pendingTaskInQueueTime); clusterHealth = maybeSerialize(clusterHealth); assertClusterHealth(clusterHealth); assertThat(clusterHealth.getNumberOfPendingTasks(), Matchers.equalTo(pendingTasks)); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java index 43d94f56e5af3..1d22e4b6c6e6a 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java @@ -66,7 +66,8 @@ public void testVerifyRepositoryWithBlocks() { // This test checks that the Get Repository operation is never blocked, even if the cluster is read only. try { setClusterReadOnly(true); - VerifyRepositoryResponse response = client().admin().cluster().prepareVerifyRepository("test-repo-blocks").execute().actionGet(); + VerifyRepositoryResponse response = client().admin().cluster() + .prepareVerifyRepository("test-repo-blocks").execute().actionGet(); assertThat(response.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); } finally { setClusterReadOnly(false); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java index 5fcd369a8a433..c345e34d20c3a 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java @@ -57,8 +57,14 @@ public void testUpdateSetting() { .transientSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 3.5) .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 4.5).build()); ClusterState build = builder.metaData(metaData).build(); - ClusterState clusterState = updater.updateSettings(build, Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.5).build(), - Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.4).build(), logger); + ClusterState clusterState = updater.updateSettings(build, + Settings.builder() + .put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.5) + .build(), + Settings.builder() + .put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.4) + .build(), + logger); assertNotSame(clusterState, build); assertEquals(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.get(clusterState.metaData().persistentSettings()), 0.4, 0.1); assertEquals(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.get(clusterState.metaData().persistentSettings()), 2.5, 0.1); @@ -73,7 +79,12 @@ public void testUpdateSetting() { assertFalse(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.exists(clusterState.metaData().transientSettings())); clusterState = updater.updateSettings(clusterState, - Settings.EMPTY, Settings.builder().putNull("cluster.routing.*").put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 10.0).build(), logger); + Settings.EMPTY, + Settings.builder() + .putNull("cluster.routing.*") + .put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 10.0) + .build(), + logger); assertEquals(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.get(clusterState.metaData().persistentSettings()), 10.0, 0.1); assertFalse(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.exists(clusterState.metaData().persistentSettings())); @@ -99,8 +110,15 @@ public void testAllOrNothing() { ClusterState build = builder.metaData(metaData).build(); try { - updater.updateSettings(build, Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), "not a float").build(), - Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), "not a float").put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 1.0f).build(), logger); + updater.updateSettings(build, + Settings.builder() + .put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), "not a float") + .build(), + Settings.builder() + .put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), "not a float") + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 1.0f) + .build(), + logger); fail("all or nothing"); } catch (IllegalArgumentException ex) { logger.info("", ex); @@ -119,14 +137,29 @@ public void testClusterBlock() { settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, shard::set); SettingsUpdater updater = new SettingsUpdater(settingsService); MetaData.Builder metaData = MetaData.builder() - .persistentSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.5) - .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 2.5).build()) - .transientSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 3.5) - .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 4.5).build()); + .persistentSettings( + Settings.builder() + .put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.5) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 2.5) + .build() + ) + .transientSettings( + Settings.builder() + .put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 3.5) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 4.5) + .build() + ); ClusterState build = builder.metaData(metaData).build(); - ClusterState clusterState = updater.updateSettings(build, Settings.builder().put(MetaData.SETTING_READ_ONLY_SETTING.getKey(), true).build(), - Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.6).put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 1.0f).build(), logger); + ClusterState clusterState = updater.updateSettings(build, + Settings.builder() + .put(MetaData.SETTING_READ_ONLY_SETTING.getKey(), true) + .build(), + Settings.builder() + .put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.6) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 1.0f) + .build(), + logger); assertEquals(clusterState.blocks().global().size(), 1); assertEquals(clusterState.blocks().global().iterator().next(), MetaData.CLUSTER_READ_ONLY_BLOCK); @@ -135,8 +168,15 @@ public void testClusterBlock() { assertEquals(clusterState.blocks().global().size(), 0); - clusterState = updater.updateSettings(build, Settings.builder().put(MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), true).build(), - Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.6).put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 1.0f).build(), logger); + clusterState = updater.updateSettings(build, + Settings.builder() + .put(MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), true) + .build(), + Settings.builder() + .put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.6) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 1.0f) + .build(), + logger); assertEquals(clusterState.blocks().global().size(), 1); assertEquals(clusterState.blocks().global().iterator().next(), MetaData.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK); clusterState = updater.updateSettings(build, Settings.EMPTY, diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java index dd875fbc4980a..f51c2b7b172c4 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java @@ -88,7 +88,8 @@ public void testCreateSnapshotWithBlocks() { logger.info("--> creating a snapshot is allowed when the cluster is read only"); try { setClusterReadOnly(true); - assertThat(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1").setWaitForCompletion(true).get().status(), equalTo(RestStatus.OK)); + assertThat(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1") + .setWaitForCompletion(true).get().status(), equalTo(RestStatus.OK)); } finally { setClusterReadOnly(false); } @@ -104,7 +105,8 @@ public void testCreateSnapshotWithIndexBlocks() { logger.info("--> creating a snapshot is not blocked when an index is read only"); try { enableIndexBlock(INDEX_NAME, SETTING_READ_ONLY); - assertThat(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1").setIndices(COMMON_INDEX_NAME_MASK).setWaitForCompletion(true).get().status(), equalTo(RestStatus.OK)); + assertThat(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1") + .setIndices(COMMON_INDEX_NAME_MASK).setWaitForCompletion(true).get().status(), equalTo(RestStatus.OK)); } finally { disableIndexBlock(INDEX_NAME, SETTING_READ_ONLY); } @@ -112,9 +114,11 @@ public void testCreateSnapshotWithIndexBlocks() { logger.info("--> creating a snapshot is blocked when an index is blocked for reads"); try { enableIndexBlock(INDEX_NAME, SETTING_BLOCKS_READ); - assertBlocked(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2").setIndices(COMMON_INDEX_NAME_MASK), IndexMetaData.INDEX_READ_BLOCK); + assertBlocked(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2") + .setIndices(COMMON_INDEX_NAME_MASK), IndexMetaData.INDEX_READ_BLOCK); logger.info("--> creating a snapshot is not blocked when an read-blocked index is not part of the snapshot"); - assertThat(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2").setIndices(OTHER_INDEX_NAME).setWaitForCompletion(true).get().status(), equalTo(RestStatus.OK)); + assertThat(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2") + .setIndices(OTHER_INDEX_NAME).setWaitForCompletion(true).get().status(), equalTo(RestStatus.OK)); } finally { disableIndexBlock(INDEX_NAME, SETTING_BLOCKS_READ); } @@ -137,7 +141,8 @@ public void testRestoreSnapshotWithBlocks() { logger.info("--> restoring a snapshot is blocked when the cluster is read only"); try { setClusterReadOnly(true); - assertBlocked(client().admin().cluster().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME), MetaData.CLUSTER_READ_ONLY_BLOCK); + assertBlocked(client().admin().cluster().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME), + MetaData.CLUSTER_READ_ONLY_BLOCK); } finally { setClusterReadOnly(false); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java index 8c77ccfef90ce..e2a07063d48d5 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java @@ -40,7 +40,8 @@ public void testSerialization() throws Exception { ClusterStateRequest clusterStateRequest = new ClusterStateRequest().routingTable(randomBoolean()).metaData(randomBoolean()) .nodes(randomBoolean()).blocks(randomBoolean()).indices("testindex", "testindex2").indicesOptions(indicesOptions); - Version testVersion = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT); + Version testVersion = VersionUtils.randomVersionBetween(random(), + Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT); BytesStreamOutput output = new BytesStreamOutput(); output.setVersion(testVersion); clusterStateRequest.writeTo(output); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java index 4bb6a5f3a8c41..bf77cdeebd067 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -54,7 +54,8 @@ private void assertCounts(ClusterStatsNodes.Counts counts, int total, Map { + searchRequest.source(SearchSourceBuilder.fromXContent(parser, false)); + request.add(searchRequest); + }); + return request; } @Override diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java index eb2f4b6904d90..38f872dea2f4c 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java @@ -64,13 +64,16 @@ public void testQueryBuilderQueryToString() { public void testSearchSourceBuilderToString() { SearchRequestBuilder searchRequestBuilder = client.prepareSearch(); searchRequestBuilder.setSource(new SearchSourceBuilder().query(QueryBuilders.termQuery("field", "value"))); - assertThat(searchRequestBuilder.toString(), equalTo(new SearchSourceBuilder().query(QueryBuilders.termQuery("field", "value")).toString())); + assertThat(searchRequestBuilder.toString(), equalTo(new SearchSourceBuilder() + .query(QueryBuilders.termQuery("field", "value")).toString())); } public void testThatToStringDoesntWipeRequestSource() { - SearchRequestBuilder searchRequestBuilder = client.prepareSearch().setSource(new SearchSourceBuilder().query(QueryBuilders.termQuery("field", "value"))); + SearchRequestBuilder searchRequestBuilder = client.prepareSearch() + .setSource(new SearchSourceBuilder().query(QueryBuilders.termQuery("field", "value"))); String preToString = searchRequestBuilder.request().toString(); - assertThat(searchRequestBuilder.toString(), equalTo(new SearchSourceBuilder().query(QueryBuilders.termQuery("field", "value")).toString())); + assertThat(searchRequestBuilder.toString(), equalTo(new SearchSourceBuilder() + .query(QueryBuilders.termQuery("field", "value")).toString())); String postToString = searchRequestBuilder.request().toString(); assertThat(preToString, equalTo(postToString)); } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index c8c40a7f5841a..aeda5f1c3fa80 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -89,7 +89,6 @@ import java.io.IOException; import java.util.Collections; -import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Locale; @@ -101,7 +100,6 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; -import static java.util.Collections.singleton; import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.state; import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithActivePrimary; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_WAIT_FOR_ACTIVE_SHARDS; @@ -110,11 +108,9 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -import static org.hamcrest.core.Is.is; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyInt; import static org.mockito.Matchers.anyLong; @@ -186,157 +182,70 @@ public static void afterClass() { threadPool = null; } - private T assertListenerThrows(String msg, PlainActionFuture listener, Class klass) { - ExecutionException exception = expectThrows(ExecutionException.class, msg, listener::get); - assertThat(exception.getCause(), instanceOf(klass)); - @SuppressWarnings("unchecked") - final T cause = (T) exception.getCause(); - return cause; - } - - private void setStateWithBlock(final ClusterService clusterService, final ClusterBlock block, final boolean globalBlock) { - final ClusterBlocks.Builder blocks = ClusterBlocks.builder(); - if (globalBlock) { - blocks.addGlobalBlock(block); - } else { - blocks.addIndexBlock("index", block); + void assertListenerThrows(String msg, PlainActionFuture listener, Class klass) throws InterruptedException { + try { + listener.get(); + fail(msg); + } catch (ExecutionException ex) { + assertThat(ex.getCause(), instanceOf(klass)); } - setState(clusterService, ClusterState.builder(clusterService.state()).blocks(blocks).build()); } - public void testBlocksInReroutePhase() throws Exception { - final ClusterBlock nonRetryableBlock = - new ClusterBlock(1, "non retryable", false, true, false, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL); - final ClusterBlock retryableBlock = - new ClusterBlock(1, "retryable", true, true, false, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL); - - final boolean globalBlock = randomBoolean(); - final TestAction action = new TestAction(Settings.EMPTY, "internal:testActionWithBlocks", - transportService, clusterService, shardStateAction, threadPool) { + public void testBlocks() throws ExecutionException, InterruptedException { + Request request = new Request(); + PlainActionFuture listener = new PlainActionFuture<>(); + ReplicationTask task = maybeTask(); + TestAction action = new TestAction(Settings.EMPTY, "internal:testActionWithBlocks", + transportService, clusterService, shardStateAction, threadPool) { @Override protected ClusterBlockLevel globalBlockLevel() { - return globalBlock ? ClusterBlockLevel.WRITE : null; - } - - @Override - protected ClusterBlockLevel indexBlockLevel() { - return globalBlock == false ? ClusterBlockLevel.WRITE : null; + return ClusterBlockLevel.WRITE; } }; - setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary("index", true, 0)); - - { - setStateWithBlock(clusterService, nonRetryableBlock, globalBlock); - - Request request = globalBlock ? new Request() : new Request().index("index"); - PlainActionFuture listener = new PlainActionFuture<>(); - ReplicationTask task = maybeTask(); - - TestAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); - reroutePhase.run(); - - ClusterBlockException exception = - assertListenerThrows("primary action should fail operation", listener, ClusterBlockException.class); - assertThat(((ClusterBlockException) exception.unwrapCause()).blocks().iterator().next(), is(nonRetryableBlock)); - assertPhase(task, "failed"); - } - { - setStateWithBlock(clusterService, retryableBlock, globalBlock); - - Request requestWithTimeout = (globalBlock ? new Request() : new Request().index("index")).timeout("5ms"); - PlainActionFuture listener = new PlainActionFuture<>(); - ReplicationTask task = maybeTask(); - - TestAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, requestWithTimeout, listener); - reroutePhase.run(); - - ClusterBlockException exception = - assertListenerThrows("failed to timeout on retryable block", listener, ClusterBlockException.class); - assertThat(((ClusterBlockException) exception.unwrapCause()).blocks().iterator().next(), is(retryableBlock)); - assertPhase(task, "failed"); - assertTrue(requestWithTimeout.isRetrySet.get()); - } - { - setStateWithBlock(clusterService, retryableBlock, globalBlock); - - Request request = globalBlock ? new Request() : new Request().index("index"); - PlainActionFuture listener = new PlainActionFuture<>(); - ReplicationTask task = maybeTask(); - - TestAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); - reroutePhase.run(); - - assertFalse("primary phase should wait on retryable block", listener.isDone()); - assertPhase(task, "waiting_for_retry"); - assertTrue(request.isRetrySet.get()); - - setStateWithBlock(clusterService, nonRetryableBlock, globalBlock); - - ClusterBlockException exception = assertListenerThrows("primary phase should fail operation when moving from a retryable " + - "block to a non-retryable one", listener, ClusterBlockException.class); - assertThat(((ClusterBlockException) exception.unwrapCause()).blocks().iterator().next(), is(nonRetryableBlock)); - assertIndexShardUninitialized(); - } - { - Request requestWithTimeout = new Request().index("unknown").setShardId(new ShardId("unknown", "_na_", 0)).timeout("5ms"); - PlainActionFuture listener = new PlainActionFuture<>(); - ReplicationTask task = maybeTask(); - - TestAction testActionWithNoBlocks = new TestAction(Settings.EMPTY, "internal:testActionWithNoBlocks", transportService, - clusterService, shardStateAction, threadPool); - listener = new PlainActionFuture<>(); - TestAction.ReroutePhase reroutePhase = testActionWithNoBlocks.new ReroutePhase(task, requestWithTimeout, listener); - reroutePhase.run(); - assertListenerThrows("should fail with an IndexNotFoundException when no blocks", listener, IndexNotFoundException.class); - } - } - - public void testBlocksInPrimaryAction() { - final boolean globalBlock = randomBoolean(); - - final TestAction actionWithBlocks = - new TestAction(Settings.EMPTY, "internal:actionWithBlocks", transportService, clusterService, shardStateAction, threadPool) { - @Override - protected ClusterBlockLevel globalBlockLevel() { - return globalBlock ? ClusterBlockLevel.WRITE : null; - } - - @Override - protected ClusterBlockLevel indexBlockLevel() { - return globalBlock == false ? ClusterBlockLevel.WRITE : null; - } - }; - - final String index = "index"; - final ShardId shardId = new ShardId(index, "_na_", 0); - setState(clusterService, stateWithActivePrimary(index, true, randomInt(5))); + ClusterBlocks.Builder block = ClusterBlocks.builder().addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, + false, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block)); + TestAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); + reroutePhase.run(); + assertListenerThrows("primary phase should fail operation", listener, ClusterBlockException.class); + assertPhase(task, "failed"); - final ClusterBlocks.Builder block = ClusterBlocks.builder(); - if (globalBlock) { - block.addGlobalBlock(new ClusterBlock(randomIntBetween(1, 16), "test global block", randomBoolean(), randomBoolean(), - randomBoolean(), RestStatus.BAD_REQUEST, ClusterBlockLevel.ALL)); - } else { - block.addIndexBlock(index, new ClusterBlock(randomIntBetween(1, 16), "test index block", randomBoolean(), randomBoolean(), - randomBoolean(), RestStatus.FORBIDDEN, ClusterBlockLevel.READ_WRITE)); - } + block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "retryable", true, true, false, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block)); + listener = new PlainActionFuture<>(); + reroutePhase = action.new ReroutePhase(task, new Request().timeout("5ms"), listener); + reroutePhase.run(); + assertListenerThrows("failed to timeout on retryable block", listener, ClusterBlockException.class); + assertPhase(task, "failed"); + assertFalse(request.isRetrySet.get()); - final ClusterState clusterState = clusterService.state(); - final String targetAllocationID = clusterState.getRoutingTable().shardRoutingTable(shardId).primaryShard().allocationId().getId(); - final long primaryTerm = clusterState.metaData().index(index).primaryTerm(shardId.id()); - final Request request = new Request(shardId); - final ReplicationTask task = maybeTask(); - final PlainActionFuture listener = new PlainActionFuture<>(); + listener = new PlainActionFuture<>(); + reroutePhase = action.new ReroutePhase(task, request = new Request(), listener); + reroutePhase.run(); + assertFalse("primary phase should wait on retryable block", listener.isDone()); + assertPhase(task, "waiting_for_retry"); + assertTrue(request.isRetrySet.get()); - final TransportReplicationAction.AsyncPrimaryAction asyncPrimaryActionWithBlocks = - actionWithBlocks.new AsyncPrimaryAction(request, targetAllocationID, primaryTerm, createTransportChannel(listener), task); - asyncPrimaryActionWithBlocks.run(); + block = ClusterBlocks.builder().addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, false, + RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block)); + assertListenerThrows("primary phase should fail operation when moving from a retryable block to a non-retryable one", listener, + ClusterBlockException.class); + assertIndexShardUninitialized(); - final ExecutionException exception = expectThrows(ExecutionException.class, listener::get); - assertThat(exception.getCause(), instanceOf(ClusterBlockException.class)); - assertThat(exception.getCause(), hasToString(containsString("test " + (globalBlock ? "global" : "index") + " block"))); - assertPhase(task, "finished"); + action = new TestAction(Settings.EMPTY, "internal:testActionWithNoBlocks", transportService, clusterService, shardStateAction, + threadPool) { + @Override + protected ClusterBlockLevel globalBlockLevel() { + return null; + } + }; + listener = new PlainActionFuture<>(); + reroutePhase = action.new ReroutePhase(task, new Request().timeout("5ms"), listener); + reroutePhase.run(); + assertListenerThrows("should fail with an IndexNotFoundException when no blocks checked", listener, IndexNotFoundException.class); } public void assertIndexShardUninitialized() { @@ -468,12 +377,21 @@ public void testClosedIndexOnReroute() throws InterruptedException { PlainActionFuture listener = new PlainActionFuture<>(); ReplicationTask task = maybeTask(); + ClusterBlockLevel indexBlockLevel = randomBoolean() ? ClusterBlockLevel.WRITE : null; TestAction action = new TestAction(Settings.EMPTY, "internal:testActionWithBlocks", transportService, - clusterService, shardStateAction, threadPool); + clusterService, shardStateAction, threadPool) { + @Override + protected ClusterBlockLevel indexBlockLevel() { + return indexBlockLevel; + } + }; TestAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); reroutePhase.run(); - assertListenerThrows("must throw index closed exception", listener, IndexClosedException.class); - + if (indexBlockLevel == ClusterBlockLevel.WRITE) { + assertListenerThrows("must throw block exception", listener, ClusterBlockException.class); + } else { + assertListenerThrows("must throw index closed exception", listener, IndexClosedException.class); + } assertPhase(task, "failed"); assertFalse(request.isRetrySet.get()); } @@ -764,12 +682,12 @@ public void testSeqNoIsSetOnPrimary() throws Exception { PlainActionFuture listener = new PlainActionFuture<>(); - final IndexShard shard = mockIndexShard(shardId, clusterService); + final IndexShard shard = mock(IndexShard.class); when(shard.getPendingPrimaryTerm()).thenReturn(primaryTerm); when(shard.routingEntry()).thenReturn(routingEntry); when(shard.isRelocatedPrimary()).thenReturn(false); IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().shardRoutingTable(shardId); - Set inSyncIds = randomBoolean() ? singleton(routingEntry.allocationId().getId()) : + Set inSyncIds = randomBoolean() ? Collections.singleton(routingEntry.allocationId().getId()) : clusterService.state().metaData().index(index).inSyncAllocationIds(0); when(shard.getReplicationGroup()).thenReturn( new ReplicationGroup(shardRoutingTable, @@ -1104,17 +1022,6 @@ protected ReplicaResult shardOperationOnReplica(Request request, IndexShard repl transportService.stop(); } - public void testIsRetryableClusterBlockException() { - final TestAction action = new TestAction(Settings.EMPTY, "internal:testIsRetryableClusterBlockException", transportService, - clusterService, shardStateAction, threadPool); - assertFalse(action.isRetryableClusterBlockException(randomRetryPrimaryException(new ShardId("index", "_na_", 0)))); - - final boolean retryable = randomBoolean(); - ClusterBlock randomBlock = new ClusterBlock(randomIntBetween(1, 16), "test", retryable, randomBoolean(), - randomBoolean(), randomFrom(RestStatus.values()), EnumSet.of(randomFrom(ClusterBlockLevel.values()))); - assertEquals(retryable, action.isRetryableClusterBlockException(new ClusterBlockException(singleton(randomBlock)))); - } - private void assertConcreteShardRequest(TransportRequest capturedRequest, Request expectedRequest, AllocationId expectedAllocationId) { final TransportReplicationAction.ConcreteShardRequest concreteShardRequest = (TransportReplicationAction.ConcreteShardRequest) capturedRequest; @@ -1208,6 +1115,15 @@ private class TestAction extends TransportReplicationAction()), new IndexNameExpressionResolver(), + Request::new, Request::new, ThreadPool.Names.SAME); + } + @Override protected TestResponse newResponseInstance() { return new TestResponse(); @@ -1267,7 +1183,6 @@ final IndexService mockIndexService(final IndexMetaData indexMetaData, ClusterSe private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService) { final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.shardId()).thenReturn(shardId); doAnswer(invocation -> { ActionListener callback = (ActionListener) invocation.getArguments()[0]; count.incrementAndGet(); diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java b/server/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java index 4be46c4fc9e68..0393f5929ce38 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java @@ -272,8 +272,10 @@ protected TestConfig[] generateTestConfigs(int numberOfTests, TestDoc[] testDocs configs.add(config); } // always adds a test that fails - configs.add(new TestConfig(new TestDoc("doesnt_exist", new TestFieldSetting[]{}, new String[]{}).index("doesn't_exist").alias("doesn't_exist"), - new String[]{"doesnt_exist"}, true, true, true).expectedException(org.elasticsearch.index.IndexNotFoundException.class)); + configs.add(new TestConfig(new TestDoc("doesnt_exist", new TestFieldSetting[]{}, new String[]{}) + .index("doesn't_exist").alias("doesn't_exist"), + new String[]{"doesnt_exist"}, true, true, true) + .expectedException(org.elasticsearch.index.IndexNotFoundException.class)); refresh(); @@ -401,9 +403,10 @@ protected void validateResponse(TermVectorsResponse esResponse, Fields luceneFie } protected TermVectorsRequestBuilder getRequestForConfig(TestConfig config) { - return client().prepareTermVectors(randomBoolean() ? config.doc.index : config.doc.alias, config.doc.type, config.doc.id).setPayloads(config.requestPayloads) - .setOffsets(config.requestOffsets).setPositions(config.requestPositions).setFieldStatistics(true).setTermStatistics(true) - .setSelectedFields(config.selectedFields).setRealtime(false); + return client().prepareTermVectors(randomBoolean() ? config.doc.index : config.doc.alias, config.doc.type, config.doc.id) + .setPayloads(config.requestPayloads) + .setOffsets(config.requestOffsets).setPositions(config.requestPositions).setFieldStatistics(true).setTermStatistics(true) + .setSelectedFields(config.selectedFields).setRealtime(false); } protected Fields getTermVectorsFromLucene(DirectoryReader directoryReader, TestDoc doc) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java b/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java index b5a596401cbbc..a45012dc4b3de 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java @@ -765,7 +765,8 @@ private void checkAnalyzedFields(Fields fieldsObject, Set fieldNames, Ma // check overridden by keyword analyzer ... if (perFieldAnalyzer.containsKey(fieldName)) { TermsEnum iterator = terms.iterator(); - assertThat("Analyzer for " + fieldName + " should have been overridden!", iterator.next().utf8ToString(), equalTo("some text here")); + assertThat("Analyzer for " + fieldName + " should have been overridden!", + iterator.next().utf8ToString(), equalTo("some text here")); assertThat(iterator.next(), nullValue()); } validFields.add(fieldName); diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java b/server/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java index 2f75f6df1a88e..08751ffe058ec 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java @@ -118,7 +118,8 @@ public void testMultiTermVectorsWithVersion() throws Exception { //Version from Lucene index refresh(); response = client().prepareMultiTermVectors() - .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(Versions.MATCH_ANY).realtime(false)) + .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field") + .version(Versions.MATCH_ANY).realtime(false)) .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(1).realtime(false)) .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(2).realtime(false)) .get(); diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java index 216c1802956e8..9a8bb38d8cd2d 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java @@ -269,13 +269,14 @@ public void testFieldTypeToTermVectorString() throws Exception { String ftOpts = FieldMapper.termVectorOptionsToString(ft); assertThat("with_positions_payloads", equalTo(ftOpts)); TextFieldMapper.Builder builder = new TextFieldMapper.Builder(null); - boolean exceptiontrown = false; + boolean exceptionThrown = false; try { TypeParsers.parseTermVector("", ftOpts, builder); } catch (MapperParsingException e) { - exceptiontrown = true; + exceptionThrown = true; } - assertThat("TypeParsers.parseTermVector should accept string with_positions_payloads but does not.", exceptiontrown, equalTo(false)); + assertThat("TypeParsers.parseTermVector should accept string with_positions_payloads but does not.", + exceptionThrown, equalTo(false)); } public void testTermVectorStringGenerationWithoutPositions() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java index 989b84f9f1a6b..d854b796f0cd8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java @@ -128,19 +128,22 @@ public void testClusterStateUpdateLogging() throws Exception { clusterApplierService.getClass().getCanonicalName(), Level.TRACE, "*failed to execute cluster state applier in [2s]*")); + mockAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "test3", + clusterApplierService.getClass().getCanonicalName(), + Level.DEBUG, + "*processing [test3]: took [0s] no change in cluster state*")); Logger clusterLogger = LogManager.getLogger("org.elasticsearch.cluster.service"); Loggers.addAppender(clusterLogger, mockAppender); try { - final CountDownLatch latch = new CountDownLatch(3); clusterApplierService.currentTimeOverride = System.nanoTime(); clusterApplierService.runOnApplierThread("test1", currentState -> clusterApplierService.currentTimeOverride += TimeValue.timeValueSeconds(1).nanos(), new ClusterApplyListener() { @Override - public void onSuccess(String source) { - latch.countDown(); - } + public void onSuccess(String source) { } @Override public void onFailure(String source, Exception e) { @@ -159,31 +162,25 @@ public void onSuccess(String source) { } @Override - public void onFailure(String source, Exception e) { - latch.countDown(); - } + public void onFailure(String source, Exception e) { } }); // Additional update task to make sure all previous logging made it to the loggerName - // We don't check logging for this on since there is no guarantee that it will occur before our check clusterApplierService.runOnApplierThread("test3", currentState -> {}, new ClusterApplyListener() { @Override - public void onSuccess(String source) { - latch.countDown(); - } + public void onSuccess(String source) { } @Override public void onFailure(String source, Exception e) { fail(); } }); - latch.await(); + assertBusy(mockAppender::assertAllExpectationsMatched); } finally { Loggers.removeAppender(clusterLogger, mockAppender); mockAppender.stop(); } - mockAppender.assertAllExpectationsMatched(); } @TestLogging("org.elasticsearch.cluster.service:WARN") // To ensure that we log cluster state events on WARN level diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/RunOnceTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/RunOnceTests.java new file mode 100644 index 0000000000000..e833edc9d56b3 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/RunOnceTests.java @@ -0,0 +1,101 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.util.concurrent; + +import org.elasticsearch.test.ESTestCase; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; + +public class RunOnceTests extends ESTestCase { + + public void testRunOnce() { + final AtomicInteger counter = new AtomicInteger(0); + final RunOnce runOnce = new RunOnce(counter::incrementAndGet); + assertFalse(runOnce.hasRun()); + + runOnce.run(); + assertTrue(runOnce.hasRun()); + assertEquals(1, counter.get()); + + runOnce.run(); + assertTrue(runOnce.hasRun()); + assertEquals(1, counter.get()); + } + + public void testRunOnceConcurrently() throws InterruptedException { + final AtomicInteger counter = new AtomicInteger(0); + final RunOnce runOnce = new RunOnce(counter::incrementAndGet); + + final Thread[] threads = new Thread[between(3, 10)]; + final CountDownLatch latch = new CountDownLatch(1); + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(() -> { + try { + latch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + runOnce.run(); + }); + threads[i].start(); + } + + latch.countDown(); + for (Thread thread : threads) { + thread.join(); + } + assertTrue(runOnce.hasRun()); + assertEquals(1, counter.get()); + } + + public void testRunOnceWithAbstractRunnable() { + final AtomicInteger onRun = new AtomicInteger(0); + final AtomicInteger onFailure = new AtomicInteger(0); + final AtomicInteger onAfter = new AtomicInteger(0); + + final RunOnce runOnce = new RunOnce(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + onRun.incrementAndGet(); + throw new RuntimeException("failure"); + } + + @Override + public void onFailure(Exception e) { + onFailure.incrementAndGet(); + } + + @Override + public void onAfter() { + onAfter.incrementAndGet(); + } + }); + + final int iterations = randomIntBetween(1, 10); + for (int i = 0; i < iterations; i++) { + runOnce.run(); + assertEquals(1, onRun.get()); + assertEquals(1, onFailure.get()); + assertEquals(1, onAfter.get()); + assertTrue(runOnce.hasRun()); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java index ed310ee305acf..1c0329a51e32a 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.discovery.zen; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; import org.elasticsearch.Version; @@ -810,6 +811,8 @@ private static class NetworkHandle { private static class TestUnicastZenPing extends UnicastZenPing { + private static final Logger logger = LogManager.getLogger(TestUnicastZenPing.class); + TestUnicastZenPing(Settings settings, ThreadPool threadPool, NetworkHandle networkHandle, PingContextProvider contextProvider) { super(Settings.builder().put("node.name", networkHandle.node.getName()).put(settings).build(), diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index ee56d07ca6594..296fd973a5bf7 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -1387,6 +1387,44 @@ public void testCrossFields() throws Exception { } } + public void testPhraseSlop() throws Exception { + Query query = new QueryStringQueryBuilder("quick fox") + .field(STRING_FIELD_NAME) + .type(MultiMatchQueryBuilder.Type.PHRASE) + .toQuery(createShardContext()); + + PhraseQuery expected = new PhraseQuery.Builder() + .add(new Term(STRING_FIELD_NAME, "quick")) + .add(new Term(STRING_FIELD_NAME, "fox")) + .build(); + assertEquals(expected, query); + + query = new QueryStringQueryBuilder("quick fox") + .field(STRING_FIELD_NAME) + .type(MultiMatchQueryBuilder.Type.PHRASE) + .phraseSlop(2) + .toQuery(createShardContext()); + + expected = new PhraseQuery.Builder() + .add(new Term(STRING_FIELD_NAME, "quick")) + .add(new Term(STRING_FIELD_NAME, "fox")) + .setSlop(2) + .build(); + assertEquals(expected, query); + + query = new QueryStringQueryBuilder("\"quick fox\"") + .field(STRING_FIELD_NAME) + .phraseSlop(2) + .toQuery(createShardContext()); + assertEquals(expected, query); + + query = new QueryStringQueryBuilder("\"quick fox\"~2") + .field(STRING_FIELD_NAME) + .phraseSlop(10) + .toQuery(createShardContext()); + assertEquals(expected, query); + } + private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings, Settings indexSettings) { Settings build = Settings.builder().put(oldIndexSettings) .put(indexSettings) diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 76918a916124c..d84bef7ed330a 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -19,6 +19,8 @@ package org.elasticsearch.indices.cluster; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; @@ -67,7 +69,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; @@ -105,11 +106,12 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class ClusterStateChanges extends AbstractComponent { +public class ClusterStateChanges { private static final Settings SETTINGS = Settings.builder() .put(PATH_HOME_SETTING.getKey(), "dummy") .build(); + private static final Logger logger = LogManager.getLogger(ClusterStateChanges.class); private final AllocationService allocationService; private final ClusterService clusterService; private final ShardStateAction.ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor; diff --git a/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java b/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java index 085a1cc264dd2..0faf226ff0500 100644 --- a/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java +++ b/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java @@ -19,6 +19,8 @@ package org.elasticsearch.persistent; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; @@ -291,6 +293,8 @@ public int hashCode() { public static class TestPersistentTasksExecutor extends PersistentTasksExecutor { + private static final Logger logger = LogManager.getLogger(TestPersistentTasksExecutor.class); + public static final String NAME = "cluster:admin/persistent/test"; private final ClusterService clusterService; diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java index da31e3ad13e6e..0d2e4723a7352 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java @@ -27,7 +27,10 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.search.AbstractSearchTestCase; @@ -56,7 +59,8 @@ public class RestValidateQueryActionTests extends AbstractSearchTestCase { private static NodeClient client = new NodeClient(Settings.EMPTY, threadPool); private static UsageService usageService = new UsageService(); - private static RestController controller = new RestController(emptySet(), null, client, null, usageService); + private static RestController controller = new RestController(emptySet(), null, client, + new NoneCircuitBreakerService(), usageService); private static RestValidateQueryAction action = new RestValidateQueryAction(Settings.EMPTY, controller); /** @@ -148,4 +152,33 @@ private RestRequest createRestRequest(String content) { .withContent(new BytesArray(content), XContentType.JSON) .build(); } + + public void testTypeInPath() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(RestRequest.Method.GET) + .withPath("/some_index/some_type/_validate/query") + .build(); + + performRequest(request); + assertWarnings(RestValidateQueryAction.TYPES_DEPRECATION_MESSAGE); + } + + public void testTypeParameter() { + Map params = new HashMap<>(); + params.put("type", "some_type"); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(RestRequest.Method.GET) + .withPath("_validate/query") + .withParams(params) + .build(); + + performRequest(request); + assertWarnings(RestValidateQueryAction.TYPES_DEPRECATION_MESSAGE); + } + + private void performRequest(RestRequest request) { + RestChannel channel = new FakeRestChannel(request, false, 1); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + controller.dispatchRequest(request, channel, threadContext); + } } diff --git a/server/src/test/java/org/elasticsearch/rest/action/search/RestCountActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/search/RestCountActionTests.java new file mode 100644 index 0000000000000..a6404332772ba --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/search/RestCountActionTests.java @@ -0,0 +1,82 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.search; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestRequest.Method; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestChannel; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.usage.UsageService; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.mockito.Mockito.mock; + +public class RestCountActionTests extends ESTestCase { + private RestController controller; + + public void setUp() throws Exception { + super.setUp(); + controller = new RestController(Collections.emptySet(), null, + mock(NodeClient.class), + new NoneCircuitBreakerService(), + new UsageService()); + new RestCountAction(Settings.EMPTY, controller); + } + + public void testTypeInPath() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(Method.POST) + .withPath("/some_index/some_type/_count") + .build(); + + performRequest(request); + assertWarnings(RestCountAction.TYPES_DEPRECATION_MESSAGE); + } + + public void testTypeParameter() { + Map params = new HashMap<>(); + params.put("type", "some_type"); + + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(Method.GET) + .withPath("/some_index/_count") + .withParams(params) + .build(); + + performRequest(request); + assertWarnings(RestCountAction.TYPES_DEPRECATION_MESSAGE); + } + + private void performRequest(RestRequest request) { + RestChannel channel = new FakeRestChannel(request, false, 1); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + controller.dispatchRequest(request, channel, threadContext); + } +} diff --git a/server/src/test/java/org/elasticsearch/rest/action/search/RestMultiSearchActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/search/RestMultiSearchActionTests.java new file mode 100644 index 0000000000000..7469758273bc8 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/search/RestMultiSearchActionTests.java @@ -0,0 +1,86 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.search; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestChannel; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.usage.UsageService; + +import java.nio.charset.StandardCharsets; +import java.util.Collections; + +import static org.mockito.Mockito.mock; + +public class RestMultiSearchActionTests extends ESTestCase { + private RestController controller; + + public void setUp() throws Exception { + super.setUp(); + controller = new RestController(Collections.emptySet(), null, + mock(NodeClient.class), + new NoneCircuitBreakerService(), + new UsageService()); + new RestMultiSearchAction(Settings.EMPTY, controller); + } + + public void testTypeInPath() { + String content = "{ \"index\": \"some_index\" } \n {} \n"; + BytesArray bytesContent = new BytesArray(content.getBytes(StandardCharsets.UTF_8)); + + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(RestRequest.Method.GET) + .withPath("/some_index/some_type/_msearch") + .withContent(bytesContent, XContentType.JSON) + .build(); + + performRequest(request); + assertWarnings(RestMultiSearchAction.TYPES_DEPRECATION_MESSAGE); + } + + public void testTypeInBody() { + String content = "{ \"index\": \"some_index\", \"type\": \"some_type\" } \n {} \n"; + BytesArray bytesContent = new BytesArray(content.getBytes(StandardCharsets.UTF_8)); + + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(RestRequest.Method.POST) + .withPath("/some_index/_msearch") + .withContent(bytesContent, XContentType.JSON) + .build(); + + performRequest(request); + assertWarnings(RestMultiSearchAction.TYPES_DEPRECATION_MESSAGE); + } + + private void performRequest(RestRequest request) { + RestChannel channel = new FakeRestChannel(request, false, 1); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + controller.dispatchRequest(request, channel, threadContext); + } +} diff --git a/server/src/test/java/org/elasticsearch/rest/action/search/RestSearchActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/search/RestSearchActionTests.java new file mode 100644 index 0000000000000..e738fa5c9dc3c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/search/RestSearchActionTests.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.search; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestChannel; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.usage.UsageService; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.mockito.Mockito.mock; + +public class RestSearchActionTests extends ESTestCase { + private RestController controller; + + public void setUp() throws Exception { + super.setUp(); + controller = new RestController(Collections.emptySet(), null, + mock(NodeClient.class), + new NoneCircuitBreakerService(), + new UsageService()); + new RestSearchAction(Settings.EMPTY, controller); + } + + public void testTypeInPath() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(RestRequest.Method.GET) + .withPath("/some_index/some_type/_search") + .build(); + + performRequest(request); + assertWarnings(RestSearchAction.TYPES_DEPRECATION_MESSAGE); + } + + public void testTypeParameter() { + Map params = new HashMap<>(); + params.put("type", "some_type"); + + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(RestRequest.Method.GET) + .withPath("/some_index/_search") + .withParams(params) + .build(); + + performRequest(request); + assertWarnings(RestSearchAction.TYPES_DEPRECATION_MESSAGE); + } + + private void performRequest(RestRequest request) { + RestChannel channel = new FakeRestChannel(request, false, 1); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + controller.dispatchRequest(request, channel, threadContext); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSourceTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSourceTests.java index 69189d4e713a7..e7703265d7b2d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSourceTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSourceTests.java @@ -22,6 +22,8 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; +import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; import org.elasticsearch.common.util.BigArrays; @@ -175,6 +177,10 @@ public void testNumericSorted() { assertNotNull(source.createSortedDocsProducerOrNull(reader, new MatchAllDocsQuery())); assertNotNull(source.createSortedDocsProducerOrNull(reader, null)); assertNotNull(source.createSortedDocsProducerOrNull(reader, LongPoint.newRangeQuery("number", 0, 1))); + assertNotNull(source.createSortedDocsProducerOrNull(reader, new IndexOrDocValuesQuery( + LongPoint.newRangeQuery("number", 0, 1), new MatchAllDocsQuery()))); + assertNotNull(source.createSortedDocsProducerOrNull(reader, new BoostQuery(new IndexOrDocValuesQuery( + LongPoint.newRangeQuery("number", 0, 1), new MatchAllDocsQuery()), 2.0f))); assertNull(source.createSortedDocsProducerOrNull(reader, new TermQuery(new Term("keyword", "toto)")))); LongValuesSource sourceWithMissing = new LongValuesSource( diff --git a/server/src/test/java/org/elasticsearch/search/profile/SearchProfileShardResultsTests.java b/server/src/test/java/org/elasticsearch/search/profile/SearchProfileShardResultsTests.java index 36841c08c9048..2874f85f49ef3 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/SearchProfileShardResultsTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/SearchProfileShardResultsTests.java @@ -87,9 +87,9 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws } SearchProfileShardResults parsed; try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { - ensureExpectedToken(parser.nextToken(), XContentParser.Token.START_OBJECT, parser::getTokenLocation); + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); ensureFieldName(parser, parser.nextToken(), SearchProfileShardResults.PROFILE_FIELD); - ensureExpectedToken(parser.nextToken(), XContentParser.Token.START_OBJECT, parser::getTokenLocation); + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); parsed = SearchProfileShardResults.fromXContent(parser); assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); diff --git a/server/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfileShardResultTests.java b/server/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfileShardResultTests.java index a21eeedb962f7..b67caeab922d2 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfileShardResultTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfileShardResultTests.java @@ -57,9 +57,9 @@ public void testFromXContent() throws IOException { AggregationProfileShardResult parsed; try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { - XContentParserUtils.ensureExpectedToken(parser.nextToken(), XContentParser.Token.START_OBJECT, parser::getTokenLocation); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); XContentParserUtils.ensureFieldName(parser, parser.nextToken(), AggregationProfileShardResult.AGGREGATIONS); - XContentParserUtils.ensureExpectedToken(parser.nextToken(), XContentParser.Token.START_ARRAY, parser::getTokenLocation); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.nextToken(), parser::getTokenLocation); parsed = AggregationProfileShardResult.fromXContent(parser); assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); assertNull(parser.nextToken()); diff --git a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfileShardResultTests.java b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfileShardResultTests.java index 6ffece354fdf0..103f8eb61dec0 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfileShardResultTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfileShardResultTests.java @@ -59,7 +59,7 @@ public void testFromXContent() throws IOException { QueryProfileShardResult parsed; try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { - XContentParserUtils.ensureExpectedToken(parser.nextToken(), XContentParser.Token.START_OBJECT, parser::getTokenLocation); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); parsed = QueryProfileShardResult.fromXContent(parser); assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); assertNull(parser.nextToken()); diff --git a/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestionOptionTests.java b/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestionOptionTests.java index df4c6898cc7d6..dc1ac3b3751dc 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestionOptionTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestionOptionTests.java @@ -48,10 +48,10 @@ public static Option createTestItem() { Text text = new Text(randomAlphaOfLengthBetween(5, 15)); int docId = randomInt(); int numberOfContexts = randomIntBetween(0, 3); - Map> contexts = new HashMap<>(); + Map> contexts = new HashMap<>(); for (int i = 0; i < numberOfContexts; i++) { int numberOfValues = randomIntBetween(0, 3); - Set values = new HashSet<>(); + Set values = new HashSet<>(); for (int v = 0; v < numberOfValues; v++) { values.add(randomAlphaOfLengthBetween(5, 15)); } @@ -106,7 +106,7 @@ private void doTestFromXContent(boolean addRandomFields) throws IOException { } public void testToXContent() throws IOException { - Map> contexts = Collections.singletonMap("key", Collections.singleton("value")); + Map> contexts = Collections.singletonMap("key", Collections.singleton("value")); CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option(1, new Text("someText"), 1.3f, contexts); BytesReference xContent = toXContent(option, XContentType.JSON, randomBoolean()); assertEquals("{\"text\":\"someText\",\"score\":1.3,\"contexts\":{\"key\":[\"value\"]}}" diff --git a/server/src/test/java/org/elasticsearch/search/suggest/SuggestionTests.java b/server/src/test/java/org/elasticsearch/search/suggest/SuggestionTests.java index 55c8048baf6bd..e05b42c102a75 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/SuggestionTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/SuggestionTests.java @@ -249,7 +249,7 @@ public void testToXContent() throws IOException { + "}", xContent.utf8ToString()); } { - Map> contexts = Collections.singletonMap("key", Collections.singleton("value")); + Map> contexts = Collections.singletonMap("key", Collections.singleton("value")); CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option(1, new Text("someText"), 1.3f, contexts); CompletionSuggestion.Entry entry = new CompletionSuggestion.Entry(new Text("entryText"), 42, 313); entry.addOption(option); diff --git a/server/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java b/server/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java index f9b252f0e136b..b79fa790fdd17 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java @@ -724,7 +724,7 @@ public void testParsingContextFromDocument() throws Exception { document.add(new Field(keyword.name(), new BytesRef("category1"), keyword)); // Ignore doc values document.add(new SortedSetDocValuesField(keyword.name(), new BytesRef("category1"))); - Set context = mapping.parseContext(document); + Set context = mapping.parseContext(document); assertThat(context.size(), equalTo(1)); assertTrue(context.contains("category1")); diff --git a/server/src/test/resources/org/elasticsearch/action/search/simple-msearch5.json b/server/src/test/resources/org/elasticsearch/action/search/simple-msearch5.json index f2cc4e0e96519..ac3fb4b62d693 100644 --- a/server/src/test/resources/org/elasticsearch/action/search/simple-msearch5.json +++ b/server/src/test/resources/org/elasticsearch/action/search/simple-msearch5.json @@ -1,6 +1,6 @@ {"index":["test0", "test1"], "request_cache": true} {"query" : {"match_all" : {}}} -{"index" : "test2,test3", "type" : "type1", "preference": "_local"} +{"index" : "test2,test3", "preference": "_local"} {"query" : {"match_all" : {}}} -{"index" : ["test4", "test1"], "type" : [ "type2", "type1" ], "routing": "123"} +{"index" : ["test4", "test1"], "routing": "123"} {"query" : {"match_all" : {}}} \ No newline at end of file diff --git a/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java b/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java index 895bd7ec77a2b..ccb010e2a915e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java +++ b/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java @@ -112,7 +112,7 @@ public UnseenEventExpectation(String name, String logger, Level level, String me @Override public void assertMatched() { - assertThat(name, saw, equalTo(false)); + assertThat("expected to see " + name + " but did not", saw, equalTo(false)); } } @@ -124,7 +124,7 @@ public SeenEventExpectation(String name, String logger, Level level, String mess @Override public void assertMatched() { - assertThat(name, saw, equalTo(true)); + assertThat("expected to see " + name + " but did not", saw, equalTo(true)); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java index 37db06a15e6a9..142e8b19a5ce3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java @@ -18,9 +18,10 @@ */ package org.elasticsearch.test.discovery; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.discovery.zen.PingContextProvider; @@ -35,7 +36,9 @@ * A {@link ZenPing} implementation which returns results based on a static in-memory map. This allows pinging * to be immediate and can be used to speed up tests. */ -public final class MockZenPing extends AbstractComponent implements ZenPing { +public final class MockZenPing implements ZenPing { + + private static final Logger logger = LogManager.getLogger(MockZenPing.class); static final Map> activeNodesPerCluster = new HashMap<>(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java b/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java index 0133a8be0c0bd..ede68a3a721b0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java +++ b/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java @@ -19,6 +19,8 @@ package org.elasticsearch.test.tasks; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.common.settings.Setting; @@ -38,6 +40,8 @@ */ public class MockTaskManager extends TaskManager { + private static final Logger logger = LogManager.getLogger(MockTaskManager.class); + public static final Setting USE_MOCK_TASK_MANAGER_SETTING = Setting.boolSetting("tests.mock.taskmanager.enabled", false, Property.NodeScope); diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index 3fc4d030da046..3a86ed30dc5d8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -20,7 +20,6 @@ package org.elasticsearch.test.transport; import com.carrotsearch.randomizedtesting.SysGlobals; -import java.util.concurrent.TimeUnit; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterModule; @@ -38,6 +37,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; @@ -67,7 +67,7 @@ import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.LinkedBlockingDeque; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.function.Supplier; @@ -349,9 +349,7 @@ public void sendRequest(Transport.Connection connection, long requestId, String request.writeTo(bStream); final TransportRequest clonedRequest = reg.newRequest(bStream.bytes().streamInput()); - Runnable runnable = new AbstractRunnable() { - AtomicBoolean requestSent = new AtomicBoolean(); - + final RunOnce runnable = new RunOnce(new AbstractRunnable() { @Override public void onFailure(Exception e) { logger.debug("failed to send delayed request", e); @@ -359,11 +357,9 @@ public void onFailure(Exception e) { @Override protected void doRun() throws IOException { - if (requestSent.compareAndSet(false, true)) { - connection.sendRequest(requestId, action, clonedRequest, options); - } + connection.sendRequest(requestId, action, clonedRequest, options); } - }; + }); // store the request to send it once the rule is cleared. synchronized (this) { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java index 797c08cc973ee..bd22b85684ca4 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.ccr.action; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; @@ -62,6 +64,8 @@ public class ShardFollowTasksExecutor extends PersistentTasksExecutor { + private static final Logger logger = LogManager.getLogger(ShardFollowTasksExecutor.class); + private final Client client; private final ThreadPool threadPool; private final ClusterService clusterService; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java index b51a451a67faa..a9fb0edee3389 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java @@ -13,10 +13,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.protocol.xpack.license.LicenseStatus; import java.io.IOException; @@ -24,7 +22,6 @@ import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; @@ -33,7 +30,6 @@ import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; public class XPackInfoResponse extends ActionResponse implements ToXContentObject { /** @@ -111,36 +107,6 @@ public String toString() { return Strings.toString(this, true, false); } - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "xpack_info_response", true, (a, v) -> { - BuildInfo buildInfo = (BuildInfo) a[0]; - LicenseInfo licenseInfo = (LicenseInfo) a[1]; - @SuppressWarnings("unchecked") // This is how constructing object parser works - List featureSets = (List) a[2]; - FeatureSetsInfo featureSetsInfo = featureSets == null ? null : new FeatureSetsInfo(new HashSet<>(featureSets)); - return new XPackInfoResponse(buildInfo, licenseInfo, featureSetsInfo); - }); - static { - PARSER.declareObject(optionalConstructorArg(), BuildInfo.PARSER, new ParseField("build")); - /* - * licenseInfo is sort of "double optional" because it is - * optional but it can also be send as `null`. - */ - PARSER.declareField(optionalConstructorArg(), (p, v) -> { - if (p.currentToken() == XContentParser.Token.VALUE_NULL) { - return null; - } - return LicenseInfo.PARSER.parse(p, v); - }, - new ParseField("license"), ValueType.OBJECT_OR_NULL); - PARSER.declareNamedObjects(optionalConstructorArg(), - (p, c, name) -> FeatureSetsInfo.FeatureSet.PARSER.parse(p, name), - new ParseField("features")); - } - public static XPackInfoResponse fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -236,24 +202,6 @@ public int hashCode() { return Objects.hash(uid, type, mode, status, expiryDate); } - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "license_info", true, (a, v) -> { - String uid = (String) a[0]; - String type = (String) a[1]; - String mode = (String) a[2]; - LicenseStatus status = LicenseStatus.fromString((String) a[3]); - Long expiryDate = (Long) a[4]; - long primitiveExpiryDate = expiryDate == null ? BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS : expiryDate; - return new LicenseInfo(uid, type, mode, status, primitiveExpiryDate); - }); - static { - PARSER.declareString(constructorArg(), new ParseField("uid")); - PARSER.declareString(constructorArg(), new ParseField("type")); - PARSER.declareString(constructorArg(), new ParseField("mode")); - PARSER.declareString(constructorArg(), new ParseField("status")); - PARSER.declareLong(optionalConstructorArg(), new ParseField("expiry_date_in_millis")); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject() @@ -449,22 +397,6 @@ public int hashCode() { return Objects.hash(name, description, available, enabled, nativeCodeInfo); } - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "feature_set", true, (a, name) -> { - String description = (String) a[0]; - boolean available = (Boolean) a[1]; - boolean enabled = (Boolean) a[2]; - @SuppressWarnings("unchecked") // Matches up with declaration below - Map nativeCodeInfo = (Map) a[3]; - return new FeatureSet(name, description, available, enabled, nativeCodeInfo); - }); - static { - PARSER.declareString(optionalConstructorArg(), new ParseField("description")); - PARSER.declareBoolean(constructorArg(), new ParseField("available")); - PARSER.declareBoolean(constructorArg(), new ParseField("enabled")); - PARSER.declareObject(optionalConstructorArg(), (p, name) -> p.map(), new ParseField("native_code_info")); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/CopyExecutionStateStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/CopyExecutionStateStep.java index b8192dd7e43be..bce5431ac074d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/CopyExecutionStateStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/CopyExecutionStateStep.java @@ -22,7 +22,7 @@ * new index has been created. Useful for actions such as shrink. */ public class CopyExecutionStateStep extends ClusterStateActionStep { - public static final String NAME = "copy_execution_state"; + public static final String NAME = "copy-execution-state"; private static final Logger logger = LogManager.getLogger(CopyExecutionStateStep.class); @@ -52,6 +52,13 @@ public ClusterState performAction(Index index, ClusterState clusterState) { String targetIndexName = shrunkIndexPrefix + indexName; IndexMetaData targetIndexMetaData = clusterState.metaData().index(targetIndexName); + if (targetIndexMetaData == null) { + logger.warn("[{}] index [{}] unable to copy execution state to target index [{}] as target index does not exist", + getKey().getAction(), index.getName(), targetIndexName); + throw new IllegalStateException("unable to copy execution state from [" + index.getName() + + "] to [" + targetIndexName + "] as target index does not exist"); + } + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(indexMetaData); String phase = lifecycleState.getPhase(); String action = lifecycleState.getAction(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverAction.java index 78dce2db1b8c2..d6b762966a944 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverAction.java @@ -132,18 +132,23 @@ public boolean isSafeAction() { @Override public List toSteps(Client client, String phase, Step.StepKey nextStepKey) { + StepKey waitForRolloverReadyStepKey = new StepKey(phase, NAME, WaitForRolloverReadyStep.NAME); + StepKey rolloverStepKey = new StepKey(phase, NAME, RolloverStep.NAME); StepKey updateDateStepKey = new StepKey(phase, NAME, UpdateRolloverLifecycleDateStep.NAME); - RolloverStep rolloverStep = new RolloverStep(new StepKey(phase, NAME, RolloverStep.NAME), updateDateStepKey, client, - maxSize, maxAge, maxDocs); + + WaitForRolloverReadyStep waitForRolloverReadyStep = new WaitForRolloverReadyStep(waitForRolloverReadyStepKey, rolloverStepKey, + client, maxSize, maxAge, maxDocs); + RolloverStep rolloverStep = new RolloverStep(rolloverStepKey, updateDateStepKey, client); UpdateRolloverLifecycleDateStep updateDateStep = new UpdateRolloverLifecycleDateStep(updateDateStepKey, nextStepKey); - return Arrays.asList(rolloverStep, updateDateStep); + return Arrays.asList(waitForRolloverReadyStep, rolloverStep, updateDateStep); } @Override public List toStepKeys(String phase) { + StepKey rolloverReadyStepKey = new StepKey(phase, NAME, WaitForRolloverReadyStep.NAME); StepKey rolloverStepKey = new StepKey(phase, NAME, RolloverStep.NAME); StepKey updateDateStepKey = new StepKey(phase, NAME, UpdateRolloverLifecycleDateStep.NAME); - return Arrays.asList(rolloverStepKey, updateDateStepKey); + return Arrays.asList(rolloverReadyStepKey, rolloverStepKey, updateDateStepKey); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStep.java index 399f90df31dae..1bdbea03df964 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStep.java @@ -5,43 +5,28 @@ */ package org.elasticsearch.xpack.core.indexlifecycle; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import java.io.IOException; import java.util.Locale; import java.util.Objects; -public class RolloverStep extends AsyncWaitStep { - public static final String NAME = "attempt_rollover"; - - private static final Logger logger = LogManager.getLogger(RolloverStep.class); - - private ByteSizeValue maxSize; - private TimeValue maxAge; - private Long maxDocs; +/** + * Unconditionally rolls over an index using the Rollover API. + */ +public class RolloverStep extends AsyncActionStep { + public static final String NAME = "attempt-rollover"; - public RolloverStep(StepKey key, StepKey nextStepKey, Client client, ByteSizeValue maxSize, TimeValue maxAge, - Long maxDocs) { + public RolloverStep(StepKey key, StepKey nextStepKey, Client client) { super(key, nextStepKey, client); - this.maxSize = maxSize; - this.maxAge = maxAge; - this.maxDocs = maxDocs; } @Override - public void evaluateCondition(IndexMetaData indexMetaData, Listener listener) { + public void performAction(IndexMetaData indexMetaData, ClusterState currentClusterState, Listener listener) { String rolloverAlias = RolloverAction.LIFECYCLE_ROLLOVER_ALIAS_SETTING.get(indexMetaData.getSettings()); if (Strings.isNullOrEmpty(rolloverAlias)) { @@ -58,49 +43,19 @@ public void evaluateCondition(IndexMetaData indexMetaData, Listener listener) { return; } + // Calling rollover with no conditions will always roll over the index RolloverRequest rolloverRequest = new RolloverRequest(rolloverAlias, null); - if (maxAge != null) { - rolloverRequest.addMaxIndexAgeCondition(maxAge); - } - if (maxSize != null) { - rolloverRequest.addMaxIndexSizeCondition(maxSize); - } - if (maxDocs != null) { - rolloverRequest.addMaxIndexDocsCondition(maxDocs); - } getClient().admin().indices().rolloverIndex(rolloverRequest, - ActionListener.wrap(response -> listener.onResponse(response.isRolledOver(), new EmptyInfo()), exception -> { - if (exception instanceof ResourceAlreadyExistsException) { - // This can happen sometimes when this step is executed multiple times - if (logger.isTraceEnabled()) { - logger.debug(() -> new ParameterizedMessage("{} index cannot roll over because the next index already exists, " + - "skipping to next step", indexMetaData.getIndex()), exception); - } else { - logger.debug("{} index cannot roll over because the next index already exists, skipping to next step", - indexMetaData.getIndex()); - } - listener.onResponse(true, new EmptyInfo()); - } else { - listener.onFailure(exception); - } - })); + ActionListener.wrap(response -> { + assert response.isRolledOver() : "the only way this rollover call should fail is with an exception"; + listener.onResponse(response.isRolledOver()); + }, listener::onFailure)); } - ByteSizeValue getMaxSize() { - return maxSize; - } - - TimeValue getMaxAge() { - return maxAge; - } - - Long getMaxDocs() { - return maxDocs; - } @Override public int hashCode() { - return Objects.hash(super.hashCode(), maxSize, maxAge, maxDocs); + return Objects.hash(super.hashCode()); } @Override @@ -112,19 +67,6 @@ public boolean equals(Object obj) { return false; } RolloverStep other = (RolloverStep) obj; - return super.equals(obj) && - Objects.equals(maxSize, other.maxSize) && - Objects.equals(maxAge, other.maxAge) && - Objects.equals(maxDocs, other.maxDocs); - } - - // We currently have no information to provide for this AsyncWaitStep, so this is an empty object - private class EmptyInfo implements ToXContentObject { - private EmptyInfo() {} - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder; - } + return super.equals(obj); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateRolloverLifecycleDateStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateRolloverLifecycleDateStep.java index 704d122f571a6..e897578c87e29 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateRolloverLifecycleDateStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateRolloverLifecycleDateStep.java @@ -32,8 +32,8 @@ public ClusterState performAction(Index index, ClusterState currentState) { } RolloverInfo rolloverInfo = indexMetaData.getRolloverInfos().get(rolloverAlias); if (rolloverInfo == null) { - throw new IllegalStateException("no rollover info found for [" + indexMetaData.getIndex().getName() + "], either the index " + - "has not yet rolled over or a subsequent index was created outside of Index Lifecycle Management"); + throw new IllegalStateException("no rollover info found for [" + indexMetaData.getIndex().getName() + "] with alias [" + + rolloverAlias + "], the index has not yet rolled over with that alias"); } LifecycleExecutionState.Builder newLifecycleState = LifecycleExecutionState diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForRolloverReadyStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForRolloverReadyStep.java new file mode 100644 index 0000000000000..fa9e59985071a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForRolloverReadyStep.java @@ -0,0 +1,118 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Locale; +import java.util.Objects; + +/** + * Waits for at least one rollover condition to be satisfied, using the Rollover API's dry_run option. + */ +public class WaitForRolloverReadyStep extends AsyncWaitStep { + + public static final String NAME = "check-rollover-ready"; + + private final ByteSizeValue maxSize; + private final TimeValue maxAge; + private final Long maxDocs; + + public WaitForRolloverReadyStep(StepKey key, StepKey nextStepKey, Client client, ByteSizeValue maxSize, TimeValue maxAge, + Long maxDocs) { + super(key, nextStepKey, client); + this.maxSize = maxSize; + this.maxAge = maxAge; + this.maxDocs = maxDocs; + } + + @Override + public void evaluateCondition(IndexMetaData indexMetaData, Listener listener) { + String rolloverAlias = RolloverAction.LIFECYCLE_ROLLOVER_ALIAS_SETTING.get(indexMetaData.getSettings()); + + if (Strings.isNullOrEmpty(rolloverAlias)) { + listener.onFailure(new IllegalArgumentException(String.format(Locale.ROOT, + "setting [%s] for index [%s] is empty or not defined", RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, + indexMetaData.getIndex().getName()))); + return; + } + + if (indexMetaData.getAliases().containsKey(rolloverAlias) == false) { + listener.onFailure(new IllegalArgumentException(String.format(Locale.ROOT, + "%s [%s] does not point to index [%s]", RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, rolloverAlias, + indexMetaData.getIndex().getName()))); + return; + } + + RolloverRequest rolloverRequest = new RolloverRequest(rolloverAlias, null); + rolloverRequest.dryRun(true); + if (maxAge != null) { + rolloverRequest.addMaxIndexAgeCondition(maxAge); + } + if (maxSize != null) { + rolloverRequest.addMaxIndexSizeCondition(maxSize); + } + if (maxDocs != null) { + rolloverRequest.addMaxIndexDocsCondition(maxDocs); + } + getClient().admin().indices().rolloverIndex(rolloverRequest, + ActionListener.wrap(response -> listener.onResponse(response.getConditionStatus().values().stream().anyMatch(i -> i), + new WaitForRolloverReadyStep.EmptyInfo()), listener::onFailure)); + } + + ByteSizeValue getMaxSize() { + return maxSize; + } + + TimeValue getMaxAge() { + return maxAge; + } + + Long getMaxDocs() { + return maxDocs; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), maxSize, maxAge, maxDocs); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + WaitForRolloverReadyStep other = (WaitForRolloverReadyStep) obj; + return super.equals(obj) && + Objects.equals(maxSize, other.maxSize) && + Objects.equals(maxAge, other.maxAge) && + Objects.equals(maxDocs, other.maxDocs); + } + + // We currently have no information to provide for this AsyncWaitStep, so this is an empty object + private class EmptyInfo implements ToXContentObject { + private EmptyInfo() { + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java index b5aac1e62256e..29956fcc259b0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java @@ -84,6 +84,7 @@ public class DatafeedConfig extends AbstractDiffable implements public static final ParseField SOURCE = new ParseField("_source"); public static final ParseField CHUNKING_CONFIG = new ParseField("chunking_config"); public static final ParseField HEADERS = new ParseField("headers"); + public static final ParseField DELAYED_DATA_CHECK_CONFIG = new ParseField("delayed_data_check_config"); // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly public static final ObjectParser LENIENT_PARSER = createParser(true); @@ -124,7 +125,9 @@ private static ObjectParser createParser(boolean ignoreUnknownFie // (For config, headers are explicitly transferred from the auth headers by code in the put/update datafeed actions.) parser.declareObject(Builder::setHeaders, (p, c) -> p.mapStrings(), HEADERS); } - + parser.declareObject(Builder::setDelayedDataCheckConfig, + ignoreUnknownFields ? DelayedDataCheckConfig.LENIENT_PARSER : DelayedDataCheckConfig.STRICT_PARSER, + DELAYED_DATA_CHECK_CONFIG); return parser; } @@ -149,10 +152,12 @@ private static ObjectParser createParser(boolean ignoreUnknownFie private final Integer scrollSize; private final ChunkingConfig chunkingConfig; private final Map headers; + private final DelayedDataCheckConfig delayedDataCheckConfig; private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, List types, QueryBuilder query, AggregatorFactories.Builder aggregations, List scriptFields, - Integer scrollSize, ChunkingConfig chunkingConfig, Map headers) { + Integer scrollSize, ChunkingConfig chunkingConfig, Map headers, + DelayedDataCheckConfig delayedDataCheckConfig) { this.id = id; this.jobId = jobId; this.queryDelay = queryDelay; @@ -165,6 +170,7 @@ private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue this.scrollSize = scrollSize; this.chunkingConfig = chunkingConfig; this.headers = Collections.unmodifiableMap(headers); + this.delayedDataCheckConfig = delayedDataCheckConfig; } public DatafeedConfig(StreamInput in) throws IOException { @@ -196,6 +202,11 @@ public DatafeedConfig(StreamInput in) throws IOException { } else { this.headers = Collections.emptyMap(); } + if (in.getVersion().onOrAfter(Version.V_6_6_0)) { + delayedDataCheckConfig = in.readOptionalWriteable(DelayedDataCheckConfig::new); + } else { + delayedDataCheckConfig = DelayedDataCheckConfig.defaultDelayedDataCheckConfig(); + } } public String getId() { @@ -260,6 +271,10 @@ public Map getHeaders() { return headers; } + public DelayedDataCheckConfig getDelayedDataCheckConfig() { + return delayedDataCheckConfig; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(id); @@ -291,6 +306,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_6_2_0)) { out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); } + if (out.getVersion().onOrAfter(Version.V_6_6_0)) { + out.writeOptionalWriteable(delayedDataCheckConfig); + } } @Override @@ -328,6 +346,9 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th if (headers.isEmpty() == false && params.paramAsBoolean(ToXContentParams.FOR_CLUSTER_STATE, false) == true) { builder.field(HEADERS.getPreferredName(), headers); } + if (delayedDataCheckConfig != null) { + builder.field(DELAYED_DATA_CHECK_CONFIG.getPreferredName(), delayedDataCheckConfig); + } return builder; } @@ -359,13 +380,14 @@ public boolean equals(Object other) { && Objects.equals(this.aggregations, that.aggregations) && Objects.equals(this.scriptFields, that.scriptFields) && Objects.equals(this.chunkingConfig, that.chunkingConfig) - && Objects.equals(this.headers, that.headers); + && Objects.equals(this.headers, that.headers) + && Objects.equals(this.delayedDataCheckConfig, that.delayedDataCheckConfig); } @Override public int hashCode() { return Objects.hash(id, jobId, frequency, queryDelay, indices, types, query, scrollSize, aggregations, scriptFields, - chunkingConfig, headers); + chunkingConfig, headers, delayedDataCheckConfig); } @Override @@ -438,6 +460,7 @@ public static class Builder { private Integer scrollSize = DEFAULT_SCROLL_SIZE; private ChunkingConfig chunkingConfig; private Map headers = Collections.emptyMap(); + private DelayedDataCheckConfig delayedDataCheckConfig = DelayedDataCheckConfig.defaultDelayedDataCheckConfig(); public Builder() { } @@ -461,6 +484,7 @@ public Builder(DatafeedConfig config) { this.scrollSize = config.scrollSize; this.chunkingConfig = config.chunkingConfig; this.headers = config.headers; + this.delayedDataCheckConfig = config.getDelayedDataCheckConfig(); } public void setId(String datafeedId) { @@ -523,6 +547,10 @@ public void setChunkingConfig(ChunkingConfig chunkingConfig) { this.chunkingConfig = chunkingConfig; } + public void setDelayedDataCheckConfig(DelayedDataCheckConfig delayedDataCheckConfig) { + this.delayedDataCheckConfig = delayedDataCheckConfig; + } + public DatafeedConfig build() { ExceptionsHelper.requireNonNull(id, ID.getPreferredName()); ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); @@ -535,11 +563,12 @@ public DatafeedConfig build() { if (types == null || types.contains(null) || types.contains("")) { throw invalidOptionValue(TYPES.getPreferredName(), types); } + validateAggregations(); setDefaultChunkingConfig(); setDefaultQueryDelay(); return new DatafeedConfig(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize, - chunkingConfig, headers); + chunkingConfig, headers, delayedDataCheckConfig); } void validateAggregations() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedJobValidator.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedJobValidator.java index b829b3fa44307..8a49b9554451f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedJobValidator.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedJobValidator.java @@ -31,6 +31,30 @@ public static void validate(DatafeedConfig datafeedConfig, Job job) { checkValidHistogramInterval(datafeedConfig, analysisConfig); checkFrequencyIsMultipleOfHistogramInterval(datafeedConfig); } + + DelayedDataCheckConfig delayedDataCheckConfig = datafeedConfig.getDelayedDataCheckConfig(); + TimeValue bucketSpan = analysisConfig.getBucketSpan(); + if (delayedDataCheckConfig.isEnabled()) { + checkValidDelayedDataCheckConfig(bucketSpan, delayedDataCheckConfig); + } + } + + private static void checkValidDelayedDataCheckConfig(TimeValue bucketSpan, DelayedDataCheckConfig delayedDataCheckConfig) { + TimeValue delayedDataCheckWindow = delayedDataCheckConfig.getCheckWindow(); + if (delayedDataCheckWindow != null) { // NULL implies we calculate on use and thus is always valid + if (delayedDataCheckWindow.compareTo(bucketSpan) < 0) { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.DATAFEED_CONFIG_DELAYED_DATA_CHECK_TOO_SMALL, + delayedDataCheckWindow, + bucketSpan)); + } + if (delayedDataCheckWindow.millis() > bucketSpan.millis() * DelayedDataCheckConfig.MAX_NUMBER_SPANABLE_BUCKETS) { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.DATAFEED_CONFIG_DELAYED_DATA_CHECK_SPANS_TOO_MANY_BUCKETS, + delayedDataCheckWindow, + bucketSpan)); + } + } } private static void checkSummaryCountFieldNameIsSet(AnalysisConfig analysisConfig) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java index d5425bdd1f469..25a97d081e683 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.ml.datafeed; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -68,6 +69,9 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { }, DatafeedConfig.SCRIPT_FIELDS); PARSER.declareInt(Builder::setScrollSize, DatafeedConfig.SCROLL_SIZE); PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.STRICT_PARSER, DatafeedConfig.CHUNKING_CONFIG); + PARSER.declareObject(Builder::setDelayedDataCheckConfig, + DelayedDataCheckConfig.STRICT_PARSER, + DatafeedConfig.DELAYED_DATA_CHECK_CONFIG); } private final String id; @@ -81,10 +85,11 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { private final List scriptFields; private final Integer scrollSize; private final ChunkingConfig chunkingConfig; + private final DelayedDataCheckConfig delayedDataCheckConfig; private DatafeedUpdate(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, List types, QueryBuilder query, AggregatorFactories.Builder aggregations, List scriptFields, - Integer scrollSize, ChunkingConfig chunkingConfig) { + Integer scrollSize, ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig) { this.id = id; this.jobId = jobId; this.queryDelay = queryDelay; @@ -96,6 +101,7 @@ private DatafeedUpdate(String id, String jobId, TimeValue queryDelay, TimeValue this.scriptFields = scriptFields; this.scrollSize = scrollSize; this.chunkingConfig = chunkingConfig; + this.delayedDataCheckConfig = delayedDataCheckConfig; } public DatafeedUpdate(StreamInput in) throws IOException { @@ -122,6 +128,11 @@ public DatafeedUpdate(StreamInput in) throws IOException { } this.scrollSize = in.readOptionalVInt(); this.chunkingConfig = in.readOptionalWriteable(ChunkingConfig::new); + if (in.getVersion().onOrAfter(Version.V_6_6_0)) { + delayedDataCheckConfig = in.readOptionalWriteable(DelayedDataCheckConfig::new); + } else { + delayedDataCheckConfig = null; + } } /** @@ -159,6 +170,9 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeOptionalVInt(scrollSize); out.writeOptionalWriteable(chunkingConfig); + if (out.getVersion().onOrAfter(Version.V_6_6_0)) { + out.writeOptionalWriteable(delayedDataCheckConfig); + } } @Override @@ -185,6 +199,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } addOptionalField(builder, DatafeedConfig.SCROLL_SIZE, scrollSize); addOptionalField(builder, DatafeedConfig.CHUNKING_CONFIG, chunkingConfig); + addOptionalField(builder, DatafeedConfig.DELAYED_DATA_CHECK_CONFIG, delayedDataCheckConfig); builder.endObject(); return builder; } @@ -250,6 +265,10 @@ ChunkingConfig getChunkingConfig() { return chunkingConfig; } + public DelayedDataCheckConfig getDelayedDataCheckConfig() { + return delayedDataCheckConfig; + } + /** * Applies the update to the given {@link DatafeedConfig} * @return a new {@link DatafeedConfig} that contains the update @@ -290,6 +309,9 @@ public DatafeedConfig apply(DatafeedConfig datafeedConfig, Map h if (chunkingConfig != null) { builder.setChunkingConfig(chunkingConfig); } + if (delayedDataCheckConfig != null) { + builder.setDelayedDataCheckConfig(delayedDataCheckConfig); + } if (headers.isEmpty() == false) { // Adjust the request, adding security headers from the current thread context @@ -328,6 +350,7 @@ public boolean equals(Object other) { && Objects.equals(this.query, that.query) && Objects.equals(this.scrollSize, that.scrollSize) && Objects.equals(this.aggregations, that.aggregations) + && Objects.equals(this.delayedDataCheckConfig, that.delayedDataCheckConfig) && Objects.equals(this.scriptFields, that.scriptFields) && Objects.equals(this.chunkingConfig, that.chunkingConfig); } @@ -335,7 +358,7 @@ public boolean equals(Object other) { @Override public int hashCode() { return Objects.hash(id, jobId, frequency, queryDelay, indices, types, query, scrollSize, aggregations, scriptFields, - chunkingConfig); + chunkingConfig, delayedDataCheckConfig); } @Override @@ -352,6 +375,7 @@ boolean isNoop(DatafeedConfig datafeed) { && (scrollSize == null || Objects.equals(scrollSize, datafeed.getQueryDelay())) && (aggregations == null || Objects.equals(aggregations, datafeed.getAggregations())) && (scriptFields == null || Objects.equals(scriptFields, datafeed.getScriptFields())) + && (delayedDataCheckConfig == null || Objects.equals(delayedDataCheckConfig, datafeed.getDelayedDataCheckConfig())) && (chunkingConfig == null || Objects.equals(chunkingConfig, datafeed.getChunkingConfig())); } @@ -368,6 +392,7 @@ public static class Builder { private List scriptFields; private Integer scrollSize; private ChunkingConfig chunkingConfig; + private DelayedDataCheckConfig delayedDataCheckConfig; public Builder() { } @@ -388,6 +413,7 @@ public Builder(DatafeedUpdate config) { this.scriptFields = config.scriptFields; this.scrollSize = config.scrollSize; this.chunkingConfig = config.chunkingConfig; + this.delayedDataCheckConfig = config.delayedDataCheckConfig; } public void setId(String datafeedId) { @@ -428,6 +454,10 @@ public void setScriptFields(List scriptFields) this.scriptFields = sorted; } + public void setDelayedDataCheckConfig(DelayedDataCheckConfig delayedDataCheckConfig) { + this.delayedDataCheckConfig = delayedDataCheckConfig; + } + public void setScrollSize(int scrollSize) { this.scrollSize = scrollSize; } @@ -438,7 +468,7 @@ public void setChunkingConfig(ChunkingConfig chunkingConfig) { public DatafeedUpdate build() { return new DatafeedUpdate(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize, - chunkingConfig); + chunkingConfig, delayedDataCheckConfig); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DelayedDataCheckConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DelayedDataCheckConfig.java new file mode 100644 index 0000000000000..9406b91d119c7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DelayedDataCheckConfig.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.datafeed; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; + +import java.io.IOException; +import java.util.Objects; + +public class DelayedDataCheckConfig implements ToXContentObject, Writeable { + + public static final TimeValue MAX_DELAYED_DATA_WINDOW = TimeValue.timeValueHours(24); + public static final int MAX_NUMBER_SPANABLE_BUCKETS = 10_000; + + public static final ParseField ENABLED = new ParseField("enabled"); + public static final ParseField CHECK_WINDOW = new ParseField("check_window"); + + // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly + public static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + public static final ConstructingObjectParser STRICT_PARSER = createParser(false); + + private static ConstructingObjectParser createParser(boolean ignoreUnknownFields) { + ConstructingObjectParser parser = new ConstructingObjectParser<>( + "delayed_data_check_config", ignoreUnknownFields, a -> new DelayedDataCheckConfig((Boolean) a[0], (TimeValue) a[1])); + + parser.declareBoolean(ConstructingObjectParser.constructorArg(), ENABLED); + parser.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return TimeValue.parseTimeValue(p.text(), CHECK_WINDOW.getPreferredName()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, CHECK_WINDOW, ObjectParser.ValueType.STRING); + + return parser; + } + + public static DelayedDataCheckConfig defaultDelayedDataCheckConfig() { + return new DelayedDataCheckConfig(true, null); + } + + public static DelayedDataCheckConfig enabledDelayedDataCheckConfig(TimeValue timeValue) { + return new DelayedDataCheckConfig(true, timeValue); + } + + public static DelayedDataCheckConfig disabledDelayedDataCheckConfig() { + return new DelayedDataCheckConfig(false, null); + } + + private final boolean enabled; + private final TimeValue checkWindow; + + DelayedDataCheckConfig(Boolean enabled, TimeValue checkWindow) { + this.enabled = enabled; + if (enabled && checkWindow != null) { + TimeUtils.checkPositive(checkWindow, CHECK_WINDOW); + if (checkWindow.compareTo(MAX_DELAYED_DATA_WINDOW) > 0) { + throw new IllegalArgumentException("check_window [" + checkWindow.getStringRep() + "] must be less than or equal to [24h]"); + } + } + this.checkWindow = checkWindow; + } + + public DelayedDataCheckConfig(StreamInput in) throws IOException { + enabled = in.readBoolean(); + checkWindow = in.readOptionalTimeValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(enabled); + out.writeOptionalTimeValue(checkWindow); + } + + public boolean isEnabled() { + return enabled; + } + + @Nullable + public TimeValue getCheckWindow() { + return checkWindow; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field(ENABLED.getPreferredName(), enabled); + if (checkWindow != null) { + builder.field(CHECK_WINDOW.getPreferredName(), checkWindow.getStringRep()); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(enabled, checkWindow); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + DelayedDataCheckConfig other = (DelayedDataCheckConfig) obj; + return Objects.equals(this.enabled, other.enabled) && Objects.equals(this.checkWindow, other.checkWindow); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java index b669e8f1edcfb..4792180ec51e9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java @@ -22,6 +22,13 @@ public final class Messages { public static final String DATAFEED_CONFIG_CANNOT_USE_SCRIPT_FIELDS_WITH_AGGS = "script_fields cannot be used in combination with aggregations"; public static final String DATAFEED_CONFIG_INVALID_OPTION_VALUE = "Invalid {0} value ''{1}'' in datafeed configuration"; + public static final String DATAFEED_CONFIG_DELAYED_DATA_CHECK_TOO_SMALL = + "delayed_data_check_window [{0}] must be greater than the bucket_span [{1}]"; + public static final String DATAFEED_CONFIG_DELAYED_DATA_CHECK_TOO_LARGE = + "delayed_data_check_window [{0}] must be less than or equal to [24h]"; + public static final String DATAFEED_CONFIG_DELAYED_DATA_CHECK_SPANS_TOO_MANY_BUCKETS = + "delayed_data_check_window [{0}] must be less than 10,000x the bucket_span [{1}]"; + public static final String DATAFEED_DOES_NOT_SUPPORT_JOB_WITH_LATENCY = "A job configured with datafeed cannot support latency"; public static final String DATAFEED_NOT_FOUND = "No datafeed with id [{0}] exists"; public static final String DATAFEED_AGGREGATIONS_REQUIRES_DATE_HISTOGRAM = @@ -63,6 +70,9 @@ public final class Messages { public static final String JOB_AUDIT_DATAFEED_LOOKBACK_COMPLETED = "Datafeed lookback completed"; public static final String JOB_AUDIT_DATAFEED_LOOKBACK_NO_DATA = "Datafeed lookback retrieved no data"; public static final String JOB_AUDIT_DATAFEED_NO_DATA = "Datafeed has been retrieving no data for a while"; + public static final String JOB_AUDIT_DATAFEED_MISSING_DATA = + "Datafeed has missed {0} documents due to ingest latency, latest bucket with missing data is [{1}]." + + " Consider increasing query_delay"; public static final String JOB_AUDIT_DATAFEED_RECOVERED = "Datafeed has recovered data extraction and analysis"; public static final String JOB_AUDIT_DATAFEED_STARTED_FROM_TO = "Datafeed started (from: {0} to: {1}) with frequency [{2}]"; public static final String JOB_AUDIT_DATAFEED_STARTED_REALTIME = "Datafeed started in real-time"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimeUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimeUtils.java index 019668f1a3c89..bebcc0a6ec368 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimeUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimeUtils.java @@ -87,21 +87,31 @@ public static void checkPositiveMultiple(TimeValue timeValue, TimeUnit baseUnit, checkMultiple(timeValue, baseUnit, field); } - private static void checkNonNegative(TimeValue timeValue, ParseField field) { + /** + * Checks that the given {@code timeValue} is positive. + * + *
    + *
  • 1s is valid
  • + *
  • -1s is invalid
  • + *
+ */ + public static void checkPositive(TimeValue timeValue, ParseField field) { long nanos = timeValue.getNanos(); - if (nanos < 0) { - throw new IllegalArgumentException(field.getPreferredName() + " cannot be less than 0. Value = " + timeValue.toString()); + if (nanos <= 0) { + throw new IllegalArgumentException(field.getPreferredName() + " cannot be less or equal than 0. Value = " + + timeValue.toString()); } } - private static void checkPositive(TimeValue timeValue, ParseField field) { + private static void checkNonNegative(TimeValue timeValue, ParseField field) { long nanos = timeValue.getNanos(); - if (nanos <= 0) { - throw new IllegalArgumentException(field.getPreferredName() + " cannot be less or equal than 0. Value = " - + timeValue.toString()); + if (nanos < 0) { + throw new IllegalArgumentException(field.getPreferredName() + " cannot be less than 0. Value = " + timeValue.toString()); } } + + /** * Check the given {@code timeValue} is a multiple of the {@code baseUnit} */ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java index fbcb1bf1571f1..361f81ab130e8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java @@ -79,8 +79,7 @@ public boolean waitForCompletion() { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); id = in.readString(); - // TODO change this after backport - if (in.getVersion().onOrAfter(Version.CURRENT)) { + if (in.getVersion().onOrAfter(Version.V_6_6_0)) { waitForCompletion = in.readBoolean(); timeout = in.readTimeValue(); } @@ -90,8 +89,7 @@ public void readFrom(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(id); - // TODO change this after backport - if (out.getVersion().onOrAfter(Version.CURRENT)) { + if (out.getVersion().onOrAfter(Version.V_6_6_0)) { out.writeBoolean(waitForCompletion); out.writeTimeValue(timeout); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandler.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandler.java index 736b9378e3876..4f548f80dd283 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandler.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandler.java @@ -14,6 +14,7 @@ import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -28,16 +29,6 @@ public class DefaultAuthenticationFailureHandler implements AuthenticationFailureHandler { private final Map> defaultFailureResponseHeaders; - /** - * Constructs default authentication failure handler - * - * @deprecated replaced by {@link #DefaultAuthenticationFailureHandler(Map)} - */ - @Deprecated - public DefaultAuthenticationFailureHandler() { - this(null); - } - /** * Constructs default authentication failure handler with provided default * response headers. @@ -55,7 +46,7 @@ public DefaultAuthenticationFailureHandler(final Map> failu .toMap(entry -> entry.getKey(), entry -> { if (entry.getKey().equalsIgnoreCase("WWW-Authenticate")) { List values = new ArrayList<>(entry.getValue()); - Collections.sort(values, (o1, o2) -> authSchemePriority(o1).compareTo(authSchemePriority(o2))); + values.sort(Comparator.comparing(DefaultAuthenticationFailureHandler::authSchemePriority)); return Collections.unmodifiableList(values); } else { return Collections.unmodifiableList(entry.getValue()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index 24b17976f4c9e..cbfc36c185e88 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -118,7 +118,9 @@ private static Map initializeReservedRoles() { RoleDescriptor.IndicesPrivileges.builder() .indices(".monitoring-*").privileges("read", "read_cross_cluster").build(), RoleDescriptor.IndicesPrivileges.builder() - .indices(".management-beats").privileges("create_index", "read", "write").build() + .indices(".management-beats").privileges("create_index", "read", "write").build(), + RoleDescriptor.IndicesPrivileges.builder() + .indices(".tasks").privileges("create_index", "read", "create").build() }, null, new ConditionalClusterPrivilege[] { new ManageApplicationPrivileges(Collections.singleton("kibana-*")) }, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/AbstractHlrcStreamableXContentTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/AbstractHlrcStreamableXContentTestCase.java index 5efcb7c824573..d471a1a03f8ef 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/AbstractHlrcStreamableXContentTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/AbstractHlrcStreamableXContentTestCase.java @@ -44,7 +44,7 @@ public final void testHlrcFromXContent() throws IOException { public abstract T convertHlrcToInternal(H instance); //TODO this would be final ideally: why do both responses need to parse from xcontent, only one (H) should? I think that T#fromXContent - //are only there for testing and could go away? + //are only there for testing and could go away? Then the additional testHlrcFromXContent is also no longer needed. @Override protected T doParseInstance(XContentParser parser) throws IOException { return convertHlrcToInternal(doHlrcParseInstance(parser)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoResponseTests.java index 1e77d6a83f2ed..8ddc98544a87e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoResponseTests.java @@ -26,11 +26,6 @@ public class XPackInfoResponseTests extends AbstractHlrcStreamableXContentTestCase { - @Override - protected XPackInfoResponse doParseInstance(XContentParser parser) throws IOException { - return XPackInfoResponse.fromXContent(parser); - } - @Override protected XPackInfoResponse createBlankInstance() { return new XPackInfoResponse(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CopyExecutionStateStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CopyExecutionStateStepTests.java index 40dd022c05de6..ef1655d99bc0c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CopyExecutionStateStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CopyExecutionStateStepTests.java @@ -17,6 +17,7 @@ import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionStateTests.createCustomMetadata; +import static org.hamcrest.Matchers.equalTo; public class CopyExecutionStateStepTests extends AbstractStepTestCase { @Override @@ -86,4 +87,25 @@ public void testPerformAction() { assertEquals(oldIndexData.getAction(), newIndexData.getAction()); assertEquals(ShrunkenIndexCheckStep.NAME, newIndexData.getStep()); } + public void testPerformActionWithNoTarget() { + CopyExecutionStateStep step = createRandomInstance(); + String indexName = randomAlphaOfLengthBetween(5, 20); + Map customMetadata = createCustomMetadata(); + + IndexMetaData originalIndexMetaData = IndexMetaData.builder(indexName) + .settings(settings(Version.CURRENT)).numberOfShards(randomIntBetween(1,5)) + .numberOfReplicas(randomIntBetween(1,5)) + .putCustom(ILM_CUSTOM_METADATA_KEY, customMetadata) + .build(); + ClusterState originalClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(MetaData.builder() + .put(originalIndexMetaData, false)) + .build(); + + IllegalStateException e = expectThrows(IllegalStateException.class, + () -> step.performAction(originalIndexMetaData.getIndex(), originalClusterState)); + + assertThat(e.getMessage(), equalTo("unable to copy execution state from [" + + indexName + "] to [" + step.getShrunkIndexPrefix() + indexName + "] as target index does not exist")); + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverActionTests.java index f13a09ac7476e..55cbad7144b4f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverActionTests.java @@ -80,17 +80,21 @@ public void testToSteps() { randomAlphaOfLengthBetween(1, 10)); List steps = action.toSteps(null, phase, nextStepKey); assertNotNull(steps); - assertEquals(2, steps.size()); - StepKey expectedFirstStepKey = new StepKey(phase, RolloverAction.NAME, RolloverStep.NAME); - StepKey expectedSecondStepKey = new StepKey(phase, RolloverAction.NAME, UpdateRolloverLifecycleDateStep.NAME); - RolloverStep firstStep = (RolloverStep) steps.get(0); - UpdateRolloverLifecycleDateStep secondStep = (UpdateRolloverLifecycleDateStep) steps.get(1); + assertEquals(3, steps.size()); + StepKey expectedFirstStepKey = new StepKey(phase, RolloverAction.NAME, WaitForRolloverReadyStep.NAME); + StepKey expectedSecondStepKey = new StepKey(phase, RolloverAction.NAME, RolloverStep.NAME); + StepKey expectedThirdStepKey = new StepKey(phase, RolloverAction.NAME, UpdateRolloverLifecycleDateStep.NAME); + WaitForRolloverReadyStep firstStep = (WaitForRolloverReadyStep) steps.get(0); + RolloverStep secondStep = (RolloverStep) steps.get(1); + UpdateRolloverLifecycleDateStep thirdStep = (UpdateRolloverLifecycleDateStep) steps.get(2); assertEquals(expectedFirstStepKey, firstStep.getKey()); assertEquals(expectedSecondStepKey, secondStep.getKey()); + assertEquals(expectedThirdStepKey, thirdStep.getKey()); assertEquals(secondStep.getKey(), firstStep.getNextStepKey()); + assertEquals(thirdStep.getKey(), secondStep.getNextStepKey()); assertEquals(action.getMaxSize(), firstStep.getMaxSize()); assertEquals(action.getMaxAge(), firstStep.getMaxAge()); assertEquals(action.getMaxDocs(), firstStep.getMaxDocs()); - assertEquals(nextStepKey, secondStep.getNextStepKey()); + assertEquals(nextStepKey, thirdStep.getNextStepKey()); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStepTests.java index 596099e6e275b..d31643e51959b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStepTests.java @@ -8,10 +8,6 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.rollover.Condition; -import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; -import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; -import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.client.AdminClient; @@ -19,10 +15,6 @@ import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; import org.junit.Before; import org.mockito.Mockito; @@ -30,10 +22,7 @@ import org.mockito.stubbing.Answer; import java.util.Collections; -import java.util.HashSet; import java.util.Locale; -import java.util.Set; -import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; @@ -50,65 +39,42 @@ public void setup() { public RolloverStep createRandomInstance() { StepKey stepKey = randomStepKey(); StepKey nextStepKey = randomStepKey(); - ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values()); - ByteSizeValue maxSize = randomBoolean() ? null : new ByteSizeValue(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit); - Long maxDocs = randomBoolean() ? null : randomNonNegativeLong(); - TimeValue maxAge = (maxDocs == null && maxSize == null || randomBoolean()) - ? TimeValue.parseTimeValue(randomPositiveTimeValue(), "rollover_action_test") - : null; - return new RolloverStep(stepKey, nextStepKey, client, maxSize, maxAge, maxDocs); + + return new RolloverStep(stepKey, nextStepKey, client); } @Override public RolloverStep mutateInstance(RolloverStep instance) { StepKey key = instance.getKey(); StepKey nextKey = instance.getNextStepKey(); - ByteSizeValue maxSize = instance.getMaxSize(); - TimeValue maxAge = instance.getMaxAge(); - Long maxDocs = instance.getMaxDocs(); - switch (between(0, 4)) { + + switch (between(0, 1)) { case 0: key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); break; case 1: nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); break; - case 2: - maxSize = randomValueOtherThan(maxSize, () -> { - ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values()); - return new ByteSizeValue(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit); - }); - break; - case 3: - maxAge = TimeValue.parseTimeValue(randomPositiveTimeValue(), "rollover_action_test"); - break; - case 4: - maxDocs = randomNonNegativeLong(); - break; default: throw new AssertionError("Illegal randomisation branch"); } - return new RolloverStep(key, nextKey, instance.getClient(), maxSize, maxAge, maxDocs); + return new RolloverStep(key, nextKey, instance.getClient()); } @Override public RolloverStep copyInstance(RolloverStep instance) { - return new RolloverStep(instance.getKey(), instance.getNextStepKey(), instance.getClient(), - instance.getMaxSize(), instance.getMaxAge(), instance.getMaxDocs()); + return new RolloverStep(instance.getKey(), instance.getNextStepKey(), instance.getClient()); } - private static void assertRolloverIndexRequest(RolloverRequest request, String alias, Set> expectedConditions) { + private static void assertRolloverIndexRequest(RolloverRequest request, String alias) { assertNotNull(request); assertEquals(1, request.indices().length); assertEquals(alias, request.indices()[0]); assertEquals(alias, request.getAlias()); - assertEquals(expectedConditions.size(), request.getConditions().size()); - Set expectedConditionValues = expectedConditions.stream().map(Condition::value).collect(Collectors.toSet()); - Set actualConditionValues = request.getConditions().values().stream() - .map(Condition::value).collect(Collectors.toSet()); - assertEquals(expectedConditionValues, actualConditionValues); + assertFalse(request.isDryRun()); + assertEquals(0, request.getConditions().size()); } public void testPerformAction() { @@ -132,17 +98,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { RolloverRequest request = (RolloverRequest) invocation.getArguments()[0]; @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[1]; - Set> expectedConditions = new HashSet<>(); - if (step.getMaxAge() != null) { - expectedConditions.add(new MaxAgeCondition(step.getMaxAge())); - } - if (step.getMaxSize() != null) { - expectedConditions.add(new MaxSizeCondition(step.getMaxSize())); - } - if (step.getMaxDocs() != null) { - expectedConditions.add(new MaxDocsCondition(step.getMaxDocs())); - } - assertRolloverIndexRequest(request, alias, expectedConditions); + assertRolloverIndexRequest(request, alias); listener.onResponse(new RolloverResponse(null, null, Collections.emptyMap(), request.isDryRun(), true, true, true)); return null; } @@ -150,10 +106,10 @@ public Void answer(InvocationOnMock invocation) throws Throwable { }).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any()); SetOnce actionCompleted = new SetOnce<>(); - step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject obj) { + public void onResponse(boolean complete) { actionCompleted.set(complete); } @@ -170,64 +126,6 @@ public void onFailure(Exception e) { Mockito.verify(indicesClient, Mockito.only()).rolloverIndex(Mockito.any(), Mockito.any()); } - public void testPerformActionNotComplete() { - String alias = randomAlphaOfLength(5); - IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)) - .putAlias(AliasMetaData.builder(alias)) - .settings(settings(Version.CURRENT).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) - .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); - RolloverStep step = createRandomInstance(); - - AdminClient adminClient = Mockito.mock(AdminClient.class); - IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); - - Mockito.when(client.admin()).thenReturn(adminClient); - Mockito.when(adminClient.indices()).thenReturn(indicesClient); - Mockito.doAnswer(new Answer() { - - @Override - public Void answer(InvocationOnMock invocation) throws Throwable { - RolloverRequest request = (RolloverRequest) invocation.getArguments()[0]; - @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; - Set> expectedConditions = new HashSet<>(); - if (step.getMaxAge() != null) { - expectedConditions.add(new MaxAgeCondition(step.getMaxAge())); - } - if (step.getMaxSize() != null) { - expectedConditions.add(new MaxSizeCondition(step.getMaxSize())); - } - if (step.getMaxDocs() != null) { - expectedConditions.add(new MaxDocsCondition(step.getMaxDocs())); - } - assertRolloverIndexRequest(request, alias, expectedConditions); - listener.onResponse(new RolloverResponse(null, null, Collections.emptyMap(), request.isDryRun(), false, true, true)); - return null; - } - - }).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any()); - - SetOnce actionCompleted = new SetOnce<>(); - step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { - - @Override - public void onResponse(boolean complete, ToXContentObject obj) { - actionCompleted.set(complete); - } - - @Override - public void onFailure(Exception e) { - throw new AssertionError("Unexpected method call", e); - } - }); - - assertEquals(false, actionCompleted.get()); - - Mockito.verify(client, Mockito.only()).admin(); - Mockito.verify(adminClient, Mockito.only()).indices(); - Mockito.verify(indicesClient, Mockito.only()).rolloverIndex(Mockito.any(), Mockito.any()); - } - public void testPerformActionFailure() { String alias = randomAlphaOfLength(5); IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)) @@ -249,17 +147,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { RolloverRequest request = (RolloverRequest) invocation.getArguments()[0]; @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[1]; - Set> expectedConditions = new HashSet<>(); - if (step.getMaxAge() != null) { - expectedConditions.add(new MaxAgeCondition(step.getMaxAge())); - } - if (step.getMaxSize() != null) { - expectedConditions.add(new MaxSizeCondition(step.getMaxSize())); - } - if (step.getMaxDocs() != null) { - expectedConditions.add(new MaxDocsCondition(step.getMaxDocs())); - } - assertRolloverIndexRequest(request, alias, expectedConditions); + assertRolloverIndexRequest(request, alias); listener.onFailure(exception); return null; } @@ -267,10 +155,10 @@ public Void answer(InvocationOnMock invocation) throws Throwable { }).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any()); SetOnce exceptionThrown = new SetOnce<>(); - step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject obj) { + public void onResponse(boolean complete) { throw new AssertionError("Unexpected method call"); } @@ -296,9 +184,9 @@ public void testPerformActionInvalidNullOrEmptyAlias() { RolloverStep step = createRandomInstance(); SetOnce exceptionThrown = new SetOnce<>(); - step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject obj) { + public void onResponse(boolean complete) { throw new AssertionError("Unexpected method call"); } @@ -321,9 +209,9 @@ public void testPerformActionAliasDoesNotPointToIndex() { RolloverStep step = createRandomInstance(); SetOnce exceptionThrown = new SetOnce<>(); - step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject obj) { + public void onResponse(boolean complete) { throw new AssertionError("Unexpected method call"); } @@ -336,6 +224,5 @@ public void onFailure(Exception e) { assertThat(exceptionThrown.get().getMessage(), equalTo(String.format(Locale.ROOT, "%s [%s] does not point to index [%s]", RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias, indexMetaData.getIndex().getName()))); - } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateRolloverLifecycleDateStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateRolloverLifecycleDateStepTests.java index 6e492e24f9b33..9db45c1b59b2f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateRolloverLifecycleDateStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateRolloverLifecycleDateStepTests.java @@ -87,8 +87,8 @@ public void testPerformActionBeforeRolloverHappened() { IllegalStateException exceptionThrown = expectThrows(IllegalStateException.class, () -> step.performAction(indexMetaData.getIndex(), clusterState)); assertThat(exceptionThrown.getMessage(), - equalTo("no rollover info found for [" + indexMetaData.getIndex().getName() + "], either the index " + - "has not yet rolled over or a subsequent index was created outside of Index Lifecycle Management")); + equalTo("no rollover info found for [" + indexMetaData.getIndex().getName() + "] with alias [" + alias + "], the index " + + "has not yet rolled over with that alias")); } public void testPerformActionWithNoRolloverAliasSetting() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForRolloverReadyStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForRolloverReadyStepTests.java new file mode 100644 index 0000000000000..b6bab1a207a14 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForRolloverReadyStepTests.java @@ -0,0 +1,345 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.rollover.Condition; +import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; +import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; +import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.junit.Before; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.util.HashSet; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; + +public class WaitForRolloverReadyStepTests extends AbstractStepTestCase { + + private Client client; + + @Before + public void setup() { + client = Mockito.mock(Client.class); + } + + @Override + protected WaitForRolloverReadyStep createRandomInstance() { + Step.StepKey stepKey = randomStepKey(); + Step.StepKey nextStepKey = randomStepKey(); + ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values()); + ByteSizeValue maxSize = randomBoolean() ? null : new ByteSizeValue(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit); + Long maxDocs = randomBoolean() ? null : randomNonNegativeLong(); + TimeValue maxAge = (maxDocs == null && maxSize == null || randomBoolean()) + ? TimeValue.parseTimeValue(randomPositiveTimeValue(), "rollover_action_test") + : null; + return new WaitForRolloverReadyStep(stepKey, nextStepKey, client, maxSize, maxAge, maxDocs); + } + + @Override + protected WaitForRolloverReadyStep mutateInstance(WaitForRolloverReadyStep instance) { + Step.StepKey key = instance.getKey(); + Step.StepKey nextKey = instance.getNextStepKey(); + ByteSizeValue maxSize = instance.getMaxSize(); + TimeValue maxAge = instance.getMaxAge(); + Long maxDocs = instance.getMaxDocs(); + + switch (between(0, 4)) { + case 0: + key = new Step.StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new Step.StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + maxSize = randomValueOtherThan(maxSize, () -> { + ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values()); + return new ByteSizeValue(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit); + }); + break; + case 3: + maxAge = TimeValue.parseTimeValue(randomPositiveTimeValue(), "rollover_action_test"); + break; + case 4: + maxDocs = randomNonNegativeLong(); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new WaitForRolloverReadyStep(key, nextKey, instance.getClient(), maxSize, maxAge, maxDocs); + } + + @Override + protected WaitForRolloverReadyStep copyInstance(WaitForRolloverReadyStep instance) { + return new WaitForRolloverReadyStep(instance.getKey(), instance.getNextStepKey(), instance.getClient(), + instance.getMaxSize(), instance.getMaxAge(), instance.getMaxDocs()); + } + + private static void assertRolloverIndexRequest(RolloverRequest request, String alias, Set> expectedConditions) { + assertNotNull(request); + assertEquals(1, request.indices().length); + assertEquals(alias, request.indices()[0]); + assertEquals(alias, request.getAlias()); + assertEquals(expectedConditions.size(), request.getConditions().size()); + assertTrue(request.isDryRun()); + Set expectedConditionValues = expectedConditions.stream().map(Condition::value).collect(Collectors.toSet()); + Set actualConditionValues = request.getConditions().values().stream() + .map(Condition::value).collect(Collectors.toSet()); + assertEquals(expectedConditionValues, actualConditionValues); + } + + + public void testEvaluateCondition() { + String alias = randomAlphaOfLength(5); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)) + .putAlias(AliasMetaData.builder(alias)) + .settings(settings(Version.CURRENT).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + + WaitForRolloverReadyStep step = createRandomInstance(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + RolloverRequest request = (RolloverRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + Set> expectedConditions = new HashSet<>(); + if (step.getMaxAge() != null) { + expectedConditions.add(new MaxAgeCondition(step.getMaxAge())); + } + if (step.getMaxSize() != null) { + expectedConditions.add(new MaxSizeCondition(step.getMaxSize())); + } + if (step.getMaxDocs() != null) { + expectedConditions.add(new MaxDocsCondition(step.getMaxDocs())); + } + assertRolloverIndexRequest(request, alias, expectedConditions); + Map conditionResults = expectedConditions.stream() + .collect(Collectors.toMap(Condition::toString, condition -> true)); + listener.onResponse(new RolloverResponse(null, null, conditionResults, request.isDryRun(), false, false, false)); + return null; + } + + }).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any()); + + SetOnce conditionsMet = new SetOnce<>(); + step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + + @Override + public void onResponse(boolean complete, ToXContentObject infomationContext) { + conditionsMet.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("Unexpected method call", e); + } + }); + + assertEquals(true, conditionsMet.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).rolloverIndex(Mockito.any(), Mockito.any()); + } + + public void testPerformActionNotComplete() { + String alias = randomAlphaOfLength(5); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)) + .putAlias(AliasMetaData.builder(alias)) + .settings(settings(Version.CURRENT).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + WaitForRolloverReadyStep step = createRandomInstance(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + RolloverRequest request = (RolloverRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + Set> expectedConditions = new HashSet<>(); + if (step.getMaxAge() != null) { + expectedConditions.add(new MaxAgeCondition(step.getMaxAge())); + } + if (step.getMaxSize() != null) { + expectedConditions.add(new MaxSizeCondition(step.getMaxSize())); + } + if (step.getMaxDocs() != null) { + expectedConditions.add(new MaxDocsCondition(step.getMaxDocs())); + } + assertRolloverIndexRequest(request, alias, expectedConditions); + Map conditionResults = expectedConditions.stream() + .collect(Collectors.toMap(Condition::toString, condition -> false)); + listener.onResponse(new RolloverResponse(null, null, conditionResults, request.isDryRun(), false, false, false)); + return null; + } + + }).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any()); + + SetOnce actionCompleted = new SetOnce<>(); + step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + + @Override + public void onResponse(boolean complete, ToXContentObject infomationContext) { + actionCompleted.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("Unexpected method call", e); + } + }); + + assertEquals(false, actionCompleted.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).rolloverIndex(Mockito.any(), Mockito.any()); + } + + public void testPerformActionFailure() { + String alias = randomAlphaOfLength(5); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)) + .putAlias(AliasMetaData.builder(alias)) + .settings(settings(Version.CURRENT).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Exception exception = new RuntimeException(); + WaitForRolloverReadyStep step = createRandomInstance(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + RolloverRequest request = (RolloverRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + Set> expectedConditions = new HashSet<>(); + if (step.getMaxAge() != null) { + expectedConditions.add(new MaxAgeCondition(step.getMaxAge())); + } + if (step.getMaxSize() != null) { + expectedConditions.add(new MaxSizeCondition(step.getMaxSize())); + } + if (step.getMaxDocs() != null) { + expectedConditions.add(new MaxDocsCondition(step.getMaxDocs())); + } + assertRolloverIndexRequest(request, alias, expectedConditions); + listener.onFailure(exception); + return null; + } + + }).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any()); + + SetOnce exceptionThrown = new SetOnce<>(); + step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + + @Override + public void onResponse(boolean complete, ToXContentObject infomationContext) { + throw new AssertionError("Unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + assertSame(exception, e); + exceptionThrown.set(true); + } + }); + + assertEquals(true, exceptionThrown.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).rolloverIndex(Mockito.any(), Mockito.any()); + } + + public void testPerformActionInvalidNullOrEmptyAlias() { + String alias = randomBoolean() ? "" : null; + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)) + .settings(settings(Version.CURRENT).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + WaitForRolloverReadyStep step = createRandomInstance(); + + SetOnce exceptionThrown = new SetOnce<>(); + step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + @Override + public void onResponse(boolean complete, ToXContentObject infomationContext) { + throw new AssertionError("Unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + exceptionThrown.set(e); + } + }); + assertThat(exceptionThrown.get().getClass(), equalTo(IllegalArgumentException.class)); + assertThat(exceptionThrown.get().getMessage(), equalTo(String.format(Locale.ROOT, + "setting [%s] for index [%s] is empty or not defined", RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, + indexMetaData.getIndex().getName()))); + } + + public void testPerformActionAliasDoesNotPointToIndex() { + String alias = randomAlphaOfLength(5); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)) + .settings(settings(Version.CURRENT).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + WaitForRolloverReadyStep step = createRandomInstance(); + + SetOnce exceptionThrown = new SetOnce<>(); + step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + @Override + public void onResponse(boolean complete, ToXContentObject infomationContext) { + throw new AssertionError("Unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + exceptionThrown.set(e); + } + }); + assertThat(exceptionThrown.get().getClass(), equalTo(IllegalArgumentException.class)); + assertThat(exceptionThrown.get().getMessage(), equalTo(String.format(Locale.ROOT, + "%s [%s] does not point to index [%s]", RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias, + indexMetaData.getIndex().getName()))); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java index 36bd2fbcb4689..fe7c5b1a1d104 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java @@ -109,6 +109,9 @@ public static DatafeedConfig createRandomizedDatafeedConfig(String jobId, long b if (randomBoolean()) { builder.setChunkingConfig(ChunkingConfigTests.createRandomizedChunk()); } + if (randomBoolean()) { + builder.setDelayedDataCheckConfig(DelayedDataCheckConfigTests.createRandomizedConfig(bucketSpanMillis)); + } return builder.build(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java index 592fdbe9de6ef..d501dde4eec41 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java @@ -89,6 +89,9 @@ public static DatafeedUpdate createRandomized(String datafeedId, @Nullable Dataf if (randomBoolean()) { builder.setChunkingConfig(ChunkingConfigTests.createRandomizedChunk()); } + if (randomBoolean()) { + builder.setDelayedDataCheckConfig(DelayedDataCheckConfigTests.createRandomizedConfig(randomLongBetween(300_001, 400_000))); + } return builder.build(); } @@ -155,6 +158,7 @@ public void testApply_givenFullUpdateNoAggregations() { update.setScriptFields(Collections.singletonList(new SearchSourceBuilder.ScriptField("a", mockScript("b"), false))); update.setScrollSize(8000); update.setChunkingConfig(ChunkingConfig.newManual(TimeValue.timeValueHours(1))); + update.setDelayedDataCheckConfig(DelayedDataCheckConfig.enabledDelayedDataCheckConfig(TimeValue.timeValueHours(1))); DatafeedConfig updatedDatafeed = update.build().apply(datafeed, Collections.emptyMap()); @@ -169,6 +173,8 @@ public void testApply_givenFullUpdateNoAggregations() { equalTo(Collections.singletonList(new SearchSourceBuilder.ScriptField("a", mockScript("b"), false)))); assertThat(updatedDatafeed.getScrollSize(), equalTo(8000)); assertThat(updatedDatafeed.getChunkingConfig(), equalTo(ChunkingConfig.newManual(TimeValue.timeValueHours(1)))); + assertThat(updatedDatafeed.getDelayedDataCheckConfig().isEnabled(), equalTo(true)); + assertThat(updatedDatafeed.getDelayedDataCheckConfig().getCheckWindow(), equalTo(TimeValue.timeValueHours(1))); } public void testApply_givenAggregations() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DelayedDataCheckConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DelayedDataCheckConfigTests.java new file mode 100644 index 0000000000000..41f991cbd7cf0 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DelayedDataCheckConfigTests.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.datafeed; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.core.Is.is; + +public class DelayedDataCheckConfigTests extends AbstractSerializingTestCase { + + @Override + protected DelayedDataCheckConfig createTestInstance(){ + return createRandomizedConfig(100); + } + + @Override + protected Writeable.Reader instanceReader() { + return DelayedDataCheckConfig::new; + } + + @Override + protected DelayedDataCheckConfig doParseInstance(XContentParser parser) { + return DelayedDataCheckConfig.STRICT_PARSER.apply(parser, null); + } + + public void testConstructor() { + expectThrows(IllegalArgumentException.class, () -> new DelayedDataCheckConfig(true, TimeValue.MINUS_ONE)); + expectThrows(IllegalArgumentException.class, () -> new DelayedDataCheckConfig(true, TimeValue.timeValueHours(25))); + } + + public void testEnabledDelayedDataCheckConfig() { + DelayedDataCheckConfig delayedDataCheckConfig = DelayedDataCheckConfig.enabledDelayedDataCheckConfig(TimeValue.timeValueHours(5)); + assertThat(delayedDataCheckConfig.isEnabled(), equalTo(true)); + assertThat(delayedDataCheckConfig.getCheckWindow(), equalTo(TimeValue.timeValueHours(5))); + } + + public void testDisabledDelayedDataCheckConfig() { + DelayedDataCheckConfig delayedDataCheckConfig = DelayedDataCheckConfig.disabledDelayedDataCheckConfig(); + assertThat(delayedDataCheckConfig.isEnabled(), equalTo(false)); + assertThat(delayedDataCheckConfig.getCheckWindow(), equalTo(null)); + } + + public void testDefaultDelayedDataCheckConfig() { + DelayedDataCheckConfig delayedDataCheckConfig = DelayedDataCheckConfig.defaultDelayedDataCheckConfig(); + assertThat(delayedDataCheckConfig.isEnabled(), equalTo(true)); + assertThat(delayedDataCheckConfig.getCheckWindow(), is(nullValue())); + } + + public static DelayedDataCheckConfig createRandomizedConfig(long bucketSpanMillis) { + boolean enabled = randomBoolean(); + TimeValue timeWindow = null; + if (enabled || randomBoolean()) { + // time span is required to be at least 1 millis, so we use a custom method to generate a time value here + timeWindow = new TimeValue(randomLongBetween(bucketSpanMillis,bucketSpanMillis*2)); + } + return new DelayedDataCheckConfig(enabled, timeWindow); + } + + @Override + protected DelayedDataCheckConfig mutateInstance(DelayedDataCheckConfig instance) throws IOException { + boolean enabled = instance.isEnabled(); + TimeValue timeWindow = instance.getCheckWindow(); + switch (between(0, 1)) { + case 0: + enabled = !enabled; + if (randomBoolean()) { + timeWindow = TimeValue.timeValueMillis(randomLongBetween(1, 1000)); + } else { + timeWindow = null; + } + break; + case 1: + if (timeWindow == null) { + timeWindow = TimeValue.timeValueMillis(randomLongBetween(1, 1000)); + } else { + timeWindow = new TimeValue(timeWindow.getMillis() + between(10, 100)); + } + enabled = true; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new DelayedDataCheckConfig(enabled, timeWindow); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfigTests.java index 8843a336bde3d..c95403a112d58 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfigTests.java @@ -46,7 +46,7 @@ public static AnalysisConfig.Builder createRandomized() { TimeValue bucketSpan = AnalysisConfig.Builder.DEFAULT_BUCKET_SPAN; if (randomBoolean()) { - bucketSpan = TimeValue.timeValueSeconds(randomIntBetween(1, 1_000_000)); + bucketSpan = TimeValue.timeValueSeconds(randomIntBetween(1, 1_000)); builder.setBucketSpan(bucketSpan); } if (isCategorization) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandlerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandlerTests.java index 15593f0b82ea5..07cdec4434a36 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandlerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandlerTests.java @@ -35,7 +35,7 @@ public void testAuthenticationRequired() { final String bearerAuthScheme = "Bearer realm=\"" + XPackField.SECURITY + "\""; final DefaultAuthenticationFailureHandler failuerHandler; if (testDefault) { - failuerHandler = new DefaultAuthenticationFailureHandler(); + failuerHandler = new DefaultAuthenticationFailureHandler(Collections.emptyMap()); } else { final Map> failureResponeHeaders = new HashMap<>(); failureResponeHeaders.put("WWW-Authenticate", Arrays.asList(basicAuthScheme, bearerAuthScheme)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index bdb8c09d48424..6a0261a4abe2e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -11,9 +11,11 @@ import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction; +import org.elasticsearch.action.admin.indices.close.CloseIndexAction; import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; import org.elasticsearch.action.admin.indices.get.GetIndexAction; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; import org.elasticsearch.action.admin.indices.recovery.RecoveryAction; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; @@ -277,6 +279,18 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(index), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(index), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(index), is(false)); + + // Tasks index + final String taskIndex = org.elasticsearch.tasks.TaskResultsService.TASK_INDEX; + // Things that kibana_system *should* be able to do + assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(taskIndex), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(PutMappingAction.NAME).test(taskIndex), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(taskIndex), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(taskIndex), is(true)); + // Things that kibana_system *should not* be able to do + assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(taskIndex), is(false)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(taskIndex), is(false)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(CloseIndexAction.NAME).test(taskIndex), is(false)); } public void testKibanaUserRole() { diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/ChangePolicyforIndexIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/ChangePolicyforIndexIT.java index 0589da29312f8..84596c423b339 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/ChangePolicyforIndexIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/ChangePolicyforIndexIT.java @@ -21,9 +21,9 @@ import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; import org.elasticsearch.xpack.core.indexlifecycle.Phase; import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; -import org.elasticsearch.xpack.core.indexlifecycle.RolloverStep; import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; +import org.elasticsearch.xpack.core.indexlifecycle.WaitForRolloverReadyStep; import java.io.IOException; import java.util.HashMap; @@ -52,7 +52,6 @@ public class ChangePolicyforIndexIT extends ESRestTestCase { * settings from the second policy are set ont he index (proving the second * policy was used for the warm phase) */ - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/35244") public void testChangePolicyForIndex() throws Exception { String indexName = "test-000001"; // create policy_1 and policy_2 @@ -92,7 +91,7 @@ public void testChangePolicyForIndex() throws Exception { ensureGreen(indexName); // Check the index is on the attempt rollover step - assertBusy(() -> assertStep(indexName, new StepKey("hot", RolloverAction.NAME, RolloverStep.NAME))); + assertBusy(() -> assertStep(indexName, new StepKey("hot", RolloverAction.NAME, WaitForRolloverReadyStep.NAME))); // Change the policy to policy_2 Request changePolicyRequest = new Request("PUT", "/" + indexName + "/_settings"); @@ -102,7 +101,7 @@ public void testChangePolicyForIndex() throws Exception { assertOK(client().performRequest(changePolicyRequest)); // Check the index is still on the attempt rollover step - assertBusy(() -> assertStep(indexName, new StepKey("hot", RolloverAction.NAME, RolloverStep.NAME))); + assertBusy(() -> assertStep(indexName, new StepKey("hot", RolloverAction.NAME, WaitForRolloverReadyStep.NAME))); // Index a single document XContentBuilder document = jsonBuilder().startObject(); @@ -131,11 +130,6 @@ public void testChangePolicyForIndex() throws Exception { assertEquals("node-1,node-2", includesAllocation); } - public void testTempAwaitFix() { - // this is a test stub since there is only one test in this class and it is - // awaits-fixed. This test is to be removed once testChangePolicyForIndex is resolved - } - private void assertStep(String indexName, StepKey expectedStep) throws IOException { Response explainResponse = client().performRequest(new Request("GET", "/" + indexName + "/_ilm/explain")); assertOK(explainResponse); diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java index 82fe66c0e2d53..adf6ab8972bab 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java @@ -33,6 +33,7 @@ import org.elasticsearch.xpack.core.indexlifecycle.ShrinkStep; import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; +import org.elasticsearch.xpack.core.indexlifecycle.WaitForRolloverReadyStep; import org.junit.Before; import java.io.IOException; @@ -160,7 +161,7 @@ public void testMoveToRolloverStep() throws Exception { " \"next_step\": {\n" + " \"phase\": \"hot\",\n" + " \"action\": \"rollover\",\n" + - " \"name\": \"attempt_rollover\"\n" + + " \"name\": \"attempt-rollover\"\n" + " }\n" + "}"); client().performRequest(moveToStepRequest); @@ -237,12 +238,10 @@ public void testRolloverAlreadyExists() throws Exception { createIndexWithSettings(originalIndex, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, "alias")); - // create policy createNewSingletonPolicy("hot", new RolloverAction(null, null, 1L)); // update policy on index updatePolicy(originalIndex, policy); - // Manually create the new index Request request = new Request("PUT", "/" + secondIndex); request.setJsonEntity("{\n \"settings\": " + Strings.toString(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) @@ -250,17 +249,14 @@ public void testRolloverAlreadyExists() throws Exception { client().performRequest(request); // wait for the shards to initialize ensureGreen(secondIndex); - // index another doc to trigger the policy index(client(), originalIndex, "_id", "foo", "bar"); - assertBusy(() -> { logger.info(originalIndex + ": " + getStepKeyForIndex(originalIndex)); logger.info(secondIndex + ": " + getStepKeyForIndex(secondIndex)); assertThat(getStepKeyForIndex(originalIndex), equalTo(new StepKey("hot", RolloverAction.NAME, ErrorStep.NAME))); - assertThat(getFailedStepForIndex(originalIndex), equalTo("update-rollover-lifecycle-date")); - assertThat(getReasonForIndex(originalIndex), equalTo("no rollover info found for [" + originalIndex + "], either the index " + - "has not yet rolled over or a subsequent index was created outside of Index Lifecycle Management")); + assertThat(getFailedStepForIndex(originalIndex), equalTo(WaitForRolloverReadyStep.NAME)); + assertThat(getReasonForIndex(originalIndex), containsString("already exists")); }); } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java index 2f7947bb51750..ef22d2c84010f 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java @@ -42,13 +42,17 @@ import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; import java.io.IOException; +import java.util.Collections; import java.util.List; import java.util.function.LongSupplier; +import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE; import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; public class IndexLifecycleRunner { private static final Logger logger = LogManager.getLogger(IndexLifecycleRunner.class); + private static final ToXContent.Params STACKTRACE_PARAMS = + new ToXContent.MapParams(Collections.singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false")); private PolicyStepsRegistry stepRegistry; private ClusterService clusterService; private LongSupplier nowSupplier; @@ -323,7 +327,7 @@ static ClusterState moveClusterStateToErrorStep(Index index, ClusterState cluste .get(LifecycleSettings.LIFECYCLE_NAME_SETTING.get(idxMeta.getSettings())); XContentBuilder causeXContentBuilder = JsonXContent.contentBuilder(); causeXContentBuilder.startObject(); - ElasticsearchException.generateThrowableXContent(causeXContentBuilder, ToXContent.EMPTY_PARAMS, cause); + ElasticsearchException.generateThrowableXContent(causeXContentBuilder, STACKTRACE_PARAMS, cause); causeXContentBuilder.endObject(); LifecycleExecutionState nextStepState = moveExecutionStateToNextStep(policyMetadata, LifecycleExecutionState.fromIndexMetadata(idxMeta), currentStep, new StepKey(currentStep.getPhase(), diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTaskTests.java index 21f2b0e70939f..4611618b2cd24 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTaskTests.java @@ -48,6 +48,7 @@ import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; import static org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTestsUtils.newTestLifecyclePolicy; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; @@ -270,7 +271,8 @@ public void testClusterActionStepThrowsException() throws IOException { assertThat(task.getNextStepKey(), equalTo(secondStep.getKey())); assertThat(lifecycleState.getPhaseTime(), nullValue()); assertThat(lifecycleState.getActionTime(), nullValue()); - assertThat(lifecycleState.getStepInfo(), equalTo("{\"type\":\"runtime_exception\",\"reason\":\"error\"}")); + assertThat(lifecycleState.getStepInfo(), + containsString("{\"type\":\"runtime_exception\",\"reason\":\"error\",\"stack_trace\":\"")); } public void testClusterWaitStepThrowsException() throws IOException { @@ -289,7 +291,8 @@ public void testClusterWaitStepThrowsException() throws IOException { assertThat(task.getNextStepKey(), equalTo(thirdStepKey)); assertThat(lifecycleState.getPhaseTime(), nullValue()); assertThat(lifecycleState.getActionTime(), nullValue()); - assertThat(lifecycleState.getStepInfo(), equalTo("{\"type\":\"runtime_exception\",\"reason\":\"error\"}")); + assertThat(lifecycleState.getStepInfo(), + containsString("{\"type\":\"runtime_exception\",\"reason\":\"error\",\"stack_trace\":\"")); } private void setStateToKey(StepKey stepKey) throws IOException { diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java index 8117207f2be84..63ee9349043a8 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java @@ -73,6 +73,7 @@ import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; import static org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTestsUtils.newTestLifecyclePolicy; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.mockito.Mockito.mock; @@ -853,12 +854,12 @@ public void testMoveClusterStateToErrorStep() throws IOException { ClusterState newClusterState = IndexLifecycleRunner.moveClusterStateToErrorStep(index, clusterState, currentStep, cause, () -> now); assertClusterStateOnErrorStep(clusterState, index, currentStep, newClusterState, now, - "{\"type\":\"exception\",\"reason\":\"THIS IS AN EXPECTED CAUSE\"}"); + "{\"type\":\"exception\",\"reason\":\"THIS IS AN EXPECTED CAUSE\""); cause = new IllegalArgumentException("non elasticsearch-exception"); newClusterState = IndexLifecycleRunner.moveClusterStateToErrorStep(index, clusterState, currentStep, cause, () -> now); assertClusterStateOnErrorStep(clusterState, index, currentStep, newClusterState, now, - "{\"type\":\"illegal_argument_exception\",\"reason\":\"non elasticsearch-exception\"}"); + "{\"type\":\"illegal_argument_exception\",\"reason\":\"non elasticsearch-exception\",\"stack_trace\":\""); } public void testMoveClusterStateToFailedStep() { @@ -1267,7 +1268,7 @@ private void assertClusterStateOnErrorStep(ClusterState oldClusterState, Index i assertEquals(currentStep.getAction(), newLifecycleState.getAction()); assertEquals(ErrorStep.NAME, newLifecycleState.getStep()); assertEquals(currentStep.getName(), newLifecycleState.getFailedStep()); - assertEquals(expectedCauseValue, newLifecycleState.getStepInfo()); + assertThat(newLifecycleState.getStepInfo(), containsString(expectedCauseValue)); assertEquals(oldLifecycleState.getPhaseTime(), newLifecycleState.getPhaseTime()); assertEquals(oldLifecycleState.getActionTime(), newLifecycleState.getActionTime()); assertEquals(now, newLifecycleState.getStepTime().longValue()); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTaskTests.java index dc3a6602f39ba..3ad3f27ebdf98 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTaskTests.java @@ -33,6 +33,7 @@ import java.util.Collections; import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; @@ -86,7 +87,8 @@ public void testExecuteSuccessfullyMoved() throws IOException { ElasticsearchException.generateThrowableXContent(causeXContentBuilder, ToXContent.EMPTY_PARAMS, cause); causeXContentBuilder.endObject(); String expectedCauseValue = BytesReference.bytes(causeXContentBuilder).utf8ToString(); - assertThat(lifecycleState.getStepInfo(), equalTo(expectedCauseValue)); + assertThat(lifecycleState.getStepInfo(), + containsString("{\"type\":\"exception\",\"reason\":\"THIS IS AN EXPECTED CAUSE\",\"stack_trace\":\"")); } public void testExecuteNoopDifferentStep() throws IOException { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DelayedDataDetectorIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DelayedDataDetectorIT.java index c672fadde7812..82b35da006d23 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DelayedDataDetectorIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DelayedDataDetectorIT.java @@ -20,22 +20,24 @@ import org.elasticsearch.xpack.core.ml.action.GetBucketsAction; import org.elasticsearch.xpack.core.ml.action.util.PageParams; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DelayedDataCheckConfig; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.elasticsearch.xpack.core.ml.job.results.Result; -import org.elasticsearch.xpack.ml.datafeed.DelayedDataDetector; -import org.elasticsearch.xpack.ml.datafeed.DelayedDataDetector.BucketWithMissingData; +import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetector; +import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetectorFactory; +import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetectorFactory.BucketWithMissingData; import org.junit.After; import org.junit.Before; import java.util.Collections; import java.util.Date; import java.util.List; +import java.util.stream.Collectors; -import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.createDatafeed; import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.createDatafeedBuilder; import static org.hamcrest.Matchers.equalTo; @@ -64,7 +66,10 @@ public void testMissingDataDetection() throws Exception { final String jobId = "delayed-data-detection-job"; Job.Builder job = createJob(jobId, TimeValue.timeValueMinutes(5), "count", null); - DatafeedConfig datafeedConfig = createDatafeed(job.getId() + "-datafeed", job.getId(), Collections.singletonList(index)); + DatafeedConfig.Builder datafeedConfigBuilder = + createDatafeedBuilder(job.getId() + "-datafeed", job.getId(), Collections.singletonList(index)); + datafeedConfigBuilder.setDelayedDataCheckConfig(DelayedDataCheckConfig.enabledDelayedDataCheckConfig(TimeValue.timeValueHours(12))); + DatafeedConfig datafeedConfig = datafeedConfigBuilder.build(); registerJob(job); putJob(job); openJob(job.getId()); @@ -77,26 +82,32 @@ public void testMissingDataDetection() throws Exception { // Get the latest finalized bucket Bucket lastBucket = getLatestFinalizedBucket(jobId); - DelayedDataDetector delayedDataDetector = - new DelayedDataDetector(job.build(new Date()), datafeedConfig, TimeValue.timeValueHours(12), client()); + DelayedDataDetector delayedDataDetector = newDetector(job.build(new Date()), datafeedConfig); List response = delayedDataDetector.detectMissingData(lastBucket.getEpoch()*1000); assertThat(response.stream().mapToLong(BucketWithMissingData::getMissingDocumentCount).sum(), equalTo(0L)); long missingDocs = randomIntBetween(32, 128); // Simply adding data within the current delayed data detection, the choice of 43100000 is arbitrary and within the window - // for the DelayedDataDetector + // for the DatafeedDelayedDataDetector writeData(logger, index, missingDocs, now - 43100000, lastBucket.getEpoch()*1000); response = delayedDataDetector.detectMissingData(lastBucket.getEpoch()*1000); assertThat(response.stream().mapToLong(BucketWithMissingData::getMissingDocumentCount).sum(), equalTo(missingDocs)); + // Assert that the are returned in order + List timeStamps = response.stream().map(BucketWithMissingData::getTimeStamp).collect(Collectors.toList()); + assertEquals(timeStamps.stream().sorted().collect(Collectors.toList()), timeStamps); } public void testMissingDataDetectionInSpecificBucket() throws Exception { final String jobId = "delayed-data-detection-job-missing-test-specific-bucket"; Job.Builder job = createJob(jobId, TimeValue.timeValueMinutes(5), "count", null); - DatafeedConfig datafeedConfig = createDatafeed(job.getId() + "-datafeed", job.getId(), Collections.singletonList(index)); + DatafeedConfig.Builder datafeedConfigBuilder = + createDatafeedBuilder(job.getId() + "-datafeed", job.getId(), Collections.singletonList(index)); + datafeedConfigBuilder.setDelayedDataCheckConfig(DelayedDataCheckConfig.enabledDelayedDataCheckConfig(TimeValue.timeValueHours(12))); + DatafeedConfig datafeedConfig = datafeedConfigBuilder.build(); + registerJob(job); putJob(job); openJob(job.getId()); @@ -110,8 +121,7 @@ public void testMissingDataDetectionInSpecificBucket() throws Exception { // Get the latest finalized bucket Bucket lastBucket = getLatestFinalizedBucket(jobId); - DelayedDataDetector delayedDataDetector = - new DelayedDataDetector(job.build(new Date()), datafeedConfig, TimeValue.timeValueHours(12), client()); + DelayedDataDetector delayedDataDetector = newDetector(job.build(new Date()), datafeedConfig); long missingDocs = randomIntBetween(1, 10); @@ -127,6 +137,10 @@ public void testMissingDataDetectionInSpecificBucket() throws Exception { } } assertThat(hasBucketWithMissing, equalTo(true)); + + // Assert that the are returned in order + List timeStamps = response.stream().map(BucketWithMissingData::getTimeStamp).collect(Collectors.toList()); + assertEquals(timeStamps.stream().sorted().collect(Collectors.toList()), timeStamps); } public void testMissingDataDetectionWithAggregationsAndQuery() throws Exception { @@ -147,6 +161,8 @@ public void testMissingDataDetectionWithAggregationsAndQuery() throws Exception .interval(TimeValue.timeValueMinutes(5).millis()))); datafeedConfigBuilder.setQuery(new RangeQueryBuilder("value").gte(numDocs/2)); datafeedConfigBuilder.setFrequency(TimeValue.timeValueMinutes(5)); + datafeedConfigBuilder.setDelayedDataCheckConfig(DelayedDataCheckConfig.enabledDelayedDataCheckConfig(TimeValue.timeValueHours(12))); + DatafeedConfig datafeedConfig = datafeedConfigBuilder.build(); registerJob(job); putJob(job); @@ -160,19 +176,21 @@ public void testMissingDataDetectionWithAggregationsAndQuery() throws Exception // Get the latest finalized bucket Bucket lastBucket = getLatestFinalizedBucket(jobId); - DelayedDataDetector delayedDataDetector = - new DelayedDataDetector(job.build(new Date()), datafeedConfig, TimeValue.timeValueHours(12), client()); + DelayedDataDetector delayedDataDetector = newDetector(job.build(new Date()), datafeedConfig); List response = delayedDataDetector.detectMissingData(lastBucket.getEpoch()*1000); assertThat(response.stream().mapToLong(BucketWithMissingData::getMissingDocumentCount).sum(), equalTo(0L)); long missingDocs = numDocs; // Simply adding data within the current delayed data detection, the choice of 43100000 is arbitrary and within the window - // for the DelayedDataDetector + // for the DatafeedDelayedDataDetector writeData(logger, index, missingDocs, now - 43100000, lastBucket.getEpoch()*1000); response = delayedDataDetector.detectMissingData(lastBucket.getEpoch()*1000); assertThat(response.stream().mapToLong(BucketWithMissingData::getMissingDocumentCount).sum(), equalTo((missingDocs+1)/2)); + // Assert that the are returned in order + List timeStamps = response.stream().map(BucketWithMissingData::getTimeStamp).collect(Collectors.toList()); + assertEquals(timeStamps.stream().sorted().collect(Collectors.toList()), timeStamps); } private Job.Builder createJob(String id, TimeValue bucketSpan, String function, String field) { @@ -231,4 +249,8 @@ private Bucket getLatestFinalizedBucket(String jobId) { getBucketsRequest.setPageParams(new PageParams(0, 1)); return getBuckets(getBucketsRequest).get(0); } + + private DelayedDataDetector newDetector(Job job, DatafeedConfig datafeedConfig) { + return DelayedDataDetectorFactory.buildDetector(job, datafeedConfig, client()); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index a3128f507d09e..ea276e8471aa4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -675,6 +675,8 @@ private void addDocMappingIfMissing(String alias, CheckedSupplier { + private static final Logger logger = LogManager.getLogger(OpenJobPersistentTasksExecutor.class); + private final AutodetectProcessManager autodetectProcessManager; /** diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java index 54a79ee199ee1..952e1c1f27e5a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentElasticsearchExtension; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.core.internal.io.Streams; import org.elasticsearch.index.mapper.DateFieldMapper; @@ -23,12 +24,16 @@ import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; +import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetector; +import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetectorFactory.BucketWithMissingData; import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; import org.elasticsearch.xpack.ml.notifications.Auditor; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; +import java.util.Date; +import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.concurrent.atomic.AtomicBoolean; @@ -41,6 +46,7 @@ class DatafeedJob { private static final Logger LOGGER = LogManager.getLogger(DatafeedJob.class); private static final int NEXT_TASK_DELAY_MS = 100; + static final long MISSING_DATA_CHECK_INTERVAL_MS = 900_000; //15 minutes in ms private final Auditor auditor; private final String jobId; @@ -50,15 +56,19 @@ class DatafeedJob { private final Client client; private final DataExtractorFactory dataExtractorFactory; private final Supplier currentTimeSupplier; + private final DelayedDataDetector delayedDataDetector; private volatile long lookbackStartTimeMs; + private volatile long latestFinalBucketEndTimeMs; + private volatile long lastDataCheckTimeMs; + private volatile int lastDataCheckAudit; private volatile Long lastEndTimeMs; private AtomicBoolean running = new AtomicBoolean(true); private volatile boolean isIsolated; DatafeedJob(String jobId, DataDescription dataDescription, long frequencyMs, long queryDelayMs, - DataExtractorFactory dataExtractorFactory, Client client, Auditor auditor, Supplier currentTimeSupplier, - long latestFinalBucketEndTimeMs, long latestRecordTimeMs) { + DataExtractorFactory dataExtractorFactory, Client client, Auditor auditor, Supplier currentTimeSupplier, + DelayedDataDetector delayedDataDetector, long latestFinalBucketEndTimeMs, long latestRecordTimeMs) { this.jobId = jobId; this.dataDescription = Objects.requireNonNull(dataDescription); this.frequencyMs = frequencyMs; @@ -67,7 +77,8 @@ class DatafeedJob { this.client = client; this.auditor = auditor; this.currentTimeSupplier = currentTimeSupplier; - + this.delayedDataDetector = delayedDataDetector; + this.latestFinalBucketEndTimeMs = latestFinalBucketEndTimeMs; long lastEndTime = Math.max(latestFinalBucketEndTimeMs, latestRecordTimeMs); if (lastEndTime > 0) { lastEndTimeMs = lastEndTime; @@ -151,9 +162,49 @@ long runRealtime() throws Exception { request.setCalcInterim(true); request.setAdvanceTime(String.valueOf(end)); run(start, end, request); + checkForMissingDataIfNecessary(); return nextRealtimeTimestamp(); } + private void checkForMissingDataIfNecessary() { + if (isRunning() && !isIsolated && checkForMissingDataTriggered()) { + + // Keep track of the last bucket time for which we did a missing data check + this.lastDataCheckTimeMs = this.currentTimeSupplier.get(); + List missingDataBuckets = delayedDataDetector.detectMissingData(latestFinalBucketEndTimeMs); + if (missingDataBuckets.isEmpty() == false) { + + long totalRecordsMissing = missingDataBuckets.stream() + .mapToLong(BucketWithMissingData::getMissingDocumentCount) + .sum(); + // The response is sorted by asc timestamp, so the last entry is the last bucket + Date lastBucketDate = missingDataBuckets.get(missingDataBuckets.size() - 1).getBucket().getTimestamp(); + int newAudit = Objects.hash(totalRecordsMissing, lastBucketDate); + if (newAudit != lastDataCheckAudit) { + auditor.warning(jobId, + Messages.getMessage(Messages.JOB_AUDIT_DATAFEED_MISSING_DATA, totalRecordsMissing, + XContentElasticsearchExtension.DEFAULT_DATE_PRINTER.print(lastBucketDate.getTime()))); + lastDataCheckAudit = newAudit; + } + } + } + } + + /** + * We wait a static interval of 15 minutes till the next missing data check. + * + * However, if our delayed data window is smaller than that, we will probably want to check at every available window (if freq. allows). + * This is to help to miss as few buckets in the delayed data check as possible. + * + * If our frequency/query delay are longer then our default interval or window size, we will end up looking for missing data on + * every real-time trigger. This should be OK as the we are pulling from the Index as such a slow pace, another query will + * probably not even be noticeable at such a large timescale. + */ + private boolean checkForMissingDataTriggered() { + return this.currentTimeSupplier.get() > this.lastDataCheckTimeMs + + Math.min(MISSING_DATA_CHECK_INTERVAL_MS, delayedDataDetector.getWindow()); + } + /** * Stops the datafeed job * @@ -260,7 +311,10 @@ private void run(long start, long end, FlushJobAction.Request flushRequest) thro // we call flush the job is closed. Thus, we don't flush unless the // datafeed is still running. if (isRunning() && !isIsolated) { - flushJob(flushRequest); + Date lastFinalizedBucketEnd = flushJob(flushRequest).getLastFinalizedBucketEnd(); + if (lastFinalizedBucketEnd != null) { + this.latestFinalBucketEndTimeMs = lastFinalizedBucketEnd.getTime(); + } } if (recordCount == 0) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java index efe332346efec..22d7bec2da249 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java @@ -13,6 +13,8 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetector; +import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetectorFactory; import org.elasticsearch.xpack.ml.job.persistence.BucketsQueryBuilder; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.results.Bucket; @@ -46,8 +48,9 @@ void build(Job job, DatafeedConfig datafeed, ActionListener listene Consumer contextHanlder = context -> { TimeValue frequency = getFrequencyOrDefault(datafeed, job); TimeValue queryDelay = datafeed.getQueryDelay(); + DelayedDataDetector delayedDataDetector = DelayedDataDetectorFactory.buildDetector(job, datafeed, client); DatafeedJob datafeedJob = new DatafeedJob(job.getId(), buildDataDescription(job), frequency.millis(), queryDelay.millis(), - context.dataExtractorFactory, client, auditor, currentTimeSupplier, + context.dataExtractorFactory, client, auditor, currentTimeSupplier, delayedDataDetector, context.latestFinalBucketEndMs, context.latestRecordTimeMs); listener.onResponse(datafeedJob); }; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DelayedDataDetector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java similarity index 72% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DelayedDataDetector.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java index 3c7c6ff963e07..86fe439ac16cb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DelayedDataDetector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java @@ -3,26 +3,26 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.datafeed; +package org.elasticsearch.xpack.ml.datafeed.delayeddatacheck; import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.xpack.core.ml.action.GetBucketsAction; import org.elasticsearch.xpack.core.ml.action.util.PageParams; -import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils; -import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetectorFactory.BucketWithMissingData; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.elasticsearch.xpack.core.ml.utils.Intervals; import org.joda.time.DateTime; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -35,32 +35,33 @@ /** * This class will search the buckets and indices over a given window to determine if any data is missing */ -public class DelayedDataDetector { +public class DatafeedDelayedDataDetector implements DelayedDataDetector { private static final String DATE_BUCKETS = "date_buckets"; + private final long bucketSpan; private final long window; - private final DatafeedConfig datafeedConfig; private final Client client; - private final Job job; - - public DelayedDataDetector(Job job, DatafeedConfig datafeedConfig, TimeValue window, Client client) { - this.job = job; - this.bucketSpan = job.getAnalysisConfig().getBucketSpan().millis(); - this.datafeedConfig = datafeedConfig; - long windowMillis = window.millis(); - if (windowMillis < bucketSpan) { - throw new IllegalArgumentException("[window] must be greater or equal to the [bucket_span]"); - } - if (Intervals.alignToFloor(windowMillis/bucketSpan, bucketSpan) >= 10000) { - throw new IllegalArgumentException("[window] must contain less than 10000 buckets at the current [bucket_span]"); - } - this.window = windowMillis; + private final String timeField; + private final String jobId; + private final QueryBuilder datafeedQuery; + private final String[] datafeedIndices; + + DatafeedDelayedDataDetector(long bucketSpan, long window, String jobId, String timeField, QueryBuilder datafeedQuery, + String[] datafeedIndices, Client client) { + this.bucketSpan = bucketSpan; + this.window = window; + this.jobId = jobId; + this.timeField = timeField; + this.datafeedQuery = datafeedQuery; + this.datafeedIndices = datafeedIndices; this.client = client; } /** - * This method looks at the {@link DatafeedConfig} from {@code latestFinalizedBucket - window} to {@code latestFinalizedBucket}. + * This method looks at the {@link DatafeedDelayedDataDetector#datafeedIndices} + * from {@code latestFinalizedBucket - window} to {@code latestFinalizedBucket} and compares the document counts with the + * {@link DatafeedDelayedDataDetector#jobId}'s finalized buckets' event counts. * * It is done synchronously, and can block for a considerable amount of time, it should only be executed within the appropriate * thread pool. @@ -68,9 +69,15 @@ public DelayedDataDetector(Job job, DatafeedConfig datafeedConfig, TimeValue win * @param latestFinalizedBucketMs The latest finalized bucket timestamp in milliseconds, signifies the end of the time window check * @return A List of {@link BucketWithMissingData} objects that contain each bucket with the current number of missing docs */ + @Override public List detectMissingData(long latestFinalizedBucketMs) { final long end = Intervals.alignToFloor(latestFinalizedBucketMs, bucketSpan); final long start = Intervals.alignToFloor(latestFinalizedBucketMs - window, bucketSpan); + + if (end <= start) { + return Collections.emptyList(); + } + List finalizedBuckets = checkBucketEvents(start, end); Map indexedData = checkCurrentBucketEventCount(start, end); return finalizedBuckets.stream() @@ -81,10 +88,17 @@ public List detectMissingData(long latestFinalizedBucketM .collect(Collectors.toList()); } + @Override + public long getWindow() { + return window; + } + private List checkBucketEvents(long start, long end) { - GetBucketsAction.Request request = new GetBucketsAction.Request(job.getId()); + GetBucketsAction.Request request = new GetBucketsAction.Request(jobId); request.setStart(Long.toString(start)); request.setEnd(Long.toString(end)); + request.setSort("timestamp"); + request.setDescending(false); request.setExcludeInterim(true); request.setPageParams(new PageParams(0, (int)((end - start)/bucketSpan))); @@ -95,13 +109,12 @@ private List checkBucketEvents(long start, long end) { } private Map checkCurrentBucketEventCount(long start, long end) { - String timeField = job.getDataDescription().getTimeField(); SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder() .size(0) .aggregation(new DateHistogramAggregationBuilder(DATE_BUCKETS).interval(bucketSpan).field(timeField)) - .query(ExtractorUtils.wrapInTimeRangeQuery(datafeedConfig.getQuery(), timeField, start, end)); + .query(ExtractorUtils.wrapInTimeRangeQuery(datafeedQuery, timeField, start, end)); - SearchRequest searchRequest = new SearchRequest(datafeedConfig.getIndices().toArray(new String[0])).source(searchSourceBuilder); + SearchRequest searchRequest = new SearchRequest(datafeedIndices).source(searchSourceBuilder); try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { SearchResponse response = client.execute(SearchAction.INSTANCE, searchRequest).actionGet(); List buckets = ((Histogram)response.getAggregations().get(DATE_BUCKETS)).getBuckets(); @@ -132,27 +145,4 @@ private static long toHistogramKeyToEpoch(Object key) { private static long calculateMissing(Map indexedData, Bucket bucket) { return indexedData.getOrDefault(bucket.getEpoch() * 1000, 0L) - bucket.getEventCount(); } - - public static class BucketWithMissingData { - - private final long missingDocumentCount; - private final Bucket bucket; - - static BucketWithMissingData fromMissingAndBucket(long missingDocumentCount, Bucket bucket) { - return new BucketWithMissingData(missingDocumentCount, bucket); - } - - private BucketWithMissingData(long missingDocumentCount, Bucket bucket) { - this.missingDocumentCount = missingDocumentCount; - this.bucket = bucket; - } - - public Bucket getBucket() { - return bucket; - } - - public long getMissingDocumentCount() { - return missingDocumentCount; - } - } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetector.java new file mode 100644 index 0000000000000..3d36f3576fd26 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetector.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.datafeed.delayeddatacheck; + +import java.util.List; + +public interface DelayedDataDetector { + List detectMissingData(long endingTimeStamp); + + long getWindow(); +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactory.java new file mode 100644 index 0000000000000..a9aeb398141d7 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactory.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.datafeed.delayeddatacheck; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DelayedDataCheckConfig; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.job.results.Bucket; + +import java.util.Objects; + +/** + * Builds the appropriate {@link DelayedDataDetector} implementation, with the appropriate settings, given the parameters. + */ +public class DelayedDataDetectorFactory { + + // There are eight 15min buckets in a two hour span, so matching that number as the fallback for very long buckets + private static final int FALLBACK_NUMBER_OF_BUCKETS_TO_SPAN = 8; + private static final TimeValue DEFAULT_CHECK_WINDOW = TimeValue.timeValueHours(2); + + /** + * This will build the appropriate detector given the parameters. + * + * If {@link DatafeedConfig#getDelayedDataCheckConfig()} is not `isEnabled()`, then a {@link NullDelayedDataDetector} is returned, which + * does not do any checks, and only supplies an empty collection. + * + * @param job The {@link Job} object for the given `datafeedConfig` + * @param datafeedConfig The {@link DatafeedConfig} for which to create the {@link DelayedDataDetector} + * @param client The {@link Client} capable of taking action against the ES Cluster. + * @return A new {@link DelayedDataDetector} + */ + public static DelayedDataDetector buildDetector(Job job, DatafeedConfig datafeedConfig, Client client) { + if (datafeedConfig.getDelayedDataCheckConfig().isEnabled()) { + long window = validateAndCalculateWindowLength(job.getAnalysisConfig().getBucketSpan(), + datafeedConfig.getDelayedDataCheckConfig().getCheckWindow()); + long bucketSpan = job.getAnalysisConfig().getBucketSpan() == null ? 0 : job.getAnalysisConfig().getBucketSpan().millis(); + return new DatafeedDelayedDataDetector(bucketSpan, + window, + job.getId(), + job.getDataDescription().getTimeField(), + datafeedConfig.getQuery(), + datafeedConfig.getIndices().toArray(new String[0]), + client); + } else { + return new NullDelayedDataDetector(); + } + } + + private static long validateAndCalculateWindowLength(TimeValue bucketSpan, TimeValue currentWindow) { + if (bucketSpan == null) { + return 0; + } + if (currentWindow == null) { // we should provide a good default as the user did not specify a window + if(bucketSpan.compareTo(DEFAULT_CHECK_WINDOW) >= 0) { + return FALLBACK_NUMBER_OF_BUCKETS_TO_SPAN * bucketSpan.millis(); + } else { + return DEFAULT_CHECK_WINDOW.millis(); + } + } + if (currentWindow.compareTo(bucketSpan) < 0) { + throw new IllegalArgumentException( + Messages.getMessage(Messages.DATAFEED_CONFIG_DELAYED_DATA_CHECK_TOO_SMALL, currentWindow.getStringRep(), + bucketSpan.getStringRep())); + } else if (currentWindow.millis() > bucketSpan.millis() * DelayedDataCheckConfig.MAX_NUMBER_SPANABLE_BUCKETS) { + throw new IllegalArgumentException( + Messages.getMessage(Messages.DATAFEED_CONFIG_DELAYED_DATA_CHECK_SPANS_TOO_MANY_BUCKETS, currentWindow.getStringRep(), + bucketSpan.getStringRep())); + } + return currentWindow.millis(); + } + + public static class BucketWithMissingData { + + private final long missingDocumentCount; + private final Bucket bucket; + + public static BucketWithMissingData fromMissingAndBucket(long missingDocumentCount, Bucket bucket) { + return new BucketWithMissingData(missingDocumentCount, bucket); + } + + private BucketWithMissingData(long missingDocumentCount, Bucket bucket) { + this.missingDocumentCount = missingDocumentCount; + this.bucket = bucket; + } + + public long getTimeStamp() { + return bucket.getEpoch(); + } + + public Bucket getBucket() { + return bucket; + } + + public long getMissingDocumentCount() { + return missingDocumentCount; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + BucketWithMissingData that = (BucketWithMissingData) other; + + return Objects.equals(that.bucket, bucket) && Objects.equals(that.missingDocumentCount, missingDocumentCount); + } + + @Override + public int hashCode() { + return Objects.hash(bucket, missingDocumentCount); + } + } + +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/NullDelayedDataDetector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/NullDelayedDataDetector.java new file mode 100644 index 0000000000000..ee6178e6e847a --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/NullDelayedDataDetector.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.datafeed.delayeddatacheck; + +import java.util.Collections; +import java.util.List; + +/** + * This class will always return an {@link Collections#emptyList()}. + */ +public class NullDelayedDataDetector implements DelayedDataDetector { + + /** + * Always returns an empty collection + * @param unusedTimeStamp unused Parameter + * @return {@link Collections#emptyList()} + */ + @Override + public List detectMissingData(long unusedTimeStamp) { + return Collections.emptyList(); + } + + /** + * Always returns 0 + * @return a 0 + */ + @Override + public long getWindow() { + return 0L; + } + +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/FlushListener.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/FlushListener.java index 1340556fbcdb1..0028bfef92883 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/FlushListener.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/FlushListener.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ml.job.process.autodetect.output; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.xpack.core.ml.job.process.autodetect.output.FlushAcknowledgement; import java.time.Duration; @@ -14,16 +15,21 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; class FlushListener { final ConcurrentMap awaitingFlushed = new ConcurrentHashMap<>(); - final AtomicBoolean cleared = new AtomicBoolean(false); + final RunOnce onClear = new RunOnce(() -> { + Iterator> latches = awaitingFlushed.entrySet().iterator(); + while (latches.hasNext()) { + latches.next().getValue().latch.countDown(); + latches.remove(); + } + }); @Nullable FlushAcknowledgement waitForFlush(String flushId, Duration timeout) throws InterruptedException { - if (cleared.get()) { + if (onClear.hasRun()) { return null; } @@ -49,13 +55,7 @@ void clear(String flushId) { } void clear() { - if (cleared.compareAndSet(false, true)) { - Iterator> latches = awaitingFlushed.entrySet().iterator(); - while (latches.hasNext()) { - latches.next().getValue().latch.countDown(); - latches.remove(); - } - } + onClear.run(); } private static class FlushAcknowledgementHolder { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobTests.java index 268a351cd249e..930817b502176 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentElasticsearchExtension; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.mock.orig.Mockito; import org.elasticsearch.test.ESTestCase; @@ -18,6 +19,10 @@ import org.elasticsearch.xpack.core.ml.action.PersistJobAction; import org.elasticsearch.xpack.core.ml.action.PostDataAction; import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.job.results.Bucket; +import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetector; +import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetectorFactory.BucketWithMissingData; import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; @@ -30,6 +35,7 @@ import java.io.IOException; import java.io.InputStream; import java.nio.charset.StandardCharsets; +import java.util.Collections; import java.util.Date; import java.util.List; import java.util.Optional; @@ -56,10 +62,12 @@ public class DatafeedJobTests extends ESTestCase { private DataExtractorFactory dataExtractorFactory; private DataExtractor dataExtractor; private Client client; + private DelayedDataDetector delayedDataDetector; private DataDescription.Builder dataDescription; ActionFuture postDataFuture; private ActionFuture flushJobFuture; private ArgumentCaptor flushJobRequests; + private FlushJobAction.Response flushJobResponse; private long currentTime; private XContentType xContentType; @@ -79,6 +87,9 @@ public void setup() throws Exception { dataDescription.setFormat(DataDescription.DataFormat.XCONTENT); postDataFuture = mock(ActionFuture.class); flushJobFuture = mock(ActionFuture.class); + flushJobResponse = new FlushJobAction.Response(); + delayedDataDetector = mock(DelayedDataDetector.class); + when(delayedDataDetector.getWindow()).thenReturn(DatafeedJob.MISSING_DATA_CHECK_INTERVAL_MS); currentTime = 0; xContentType = XContentType.JSON; @@ -96,6 +107,7 @@ public void setup() throws Exception { when(postDataFuture.actionGet()).thenReturn(new PostDataAction.Response(dataCounts)); flushJobRequests = ArgumentCaptor.forClass(FlushJobAction.Request.class); + when(flushJobFuture.actionGet()).thenReturn(flushJobResponse); when(client.execute(same(FlushJobAction.INSTANCE), flushJobRequests.capture())).thenReturn(flushJobFuture); } @@ -193,6 +205,13 @@ public void testContinueFromNow() throws Exception { } public void testRealtimeRun() throws Exception { + flushJobResponse = new FlushJobAction.Response(true, new Date(2000)); + Bucket bucket = mock(Bucket.class); + when(bucket.getTimestamp()).thenReturn(new Date(2000)); + when(flushJobFuture.actionGet()).thenReturn(flushJobResponse); + when(client.execute(same(FlushJobAction.INSTANCE), flushJobRequests.capture())).thenReturn(flushJobFuture); + when(delayedDataDetector.detectMissingData(2000)) + .thenReturn(Collections.singletonList(BucketWithMissingData.fromMissingAndBucket(10, bucket))); currentTime = 60000L; long frequencyMs = 100; long queryDelayMs = 1000; @@ -206,6 +225,29 @@ public void testRealtimeRun() throws Exception { flushRequest.setAdvanceTime("59000"); verify(client).execute(same(FlushJobAction.INSTANCE), eq(flushRequest)); verify(client, never()).execute(same(PersistJobAction.INSTANCE), any()); + + // Execute a second valid time, but do so in a smaller window than the interval + currentTime = 62000L; + byte[] contentBytes = "content".getBytes(StandardCharsets.UTF_8); + InputStream inputStream = new ByteArrayInputStream(contentBytes); + when(dataExtractor.hasNext()).thenReturn(true).thenReturn(false); + when(dataExtractor.next()).thenReturn(Optional.of(inputStream)); + when(dataExtractorFactory.newExtractor(anyLong(), anyLong())).thenReturn(dataExtractor); + datafeedJob.runRealtime(); + + // Execute a third time, but this time make sure we exceed the data check interval, but keep the delayedDataDetector response + // the same + currentTime = 62000L + DatafeedJob.MISSING_DATA_CHECK_INTERVAL_MS + 1; + inputStream = new ByteArrayInputStream(contentBytes); + when(dataExtractor.hasNext()).thenReturn(true).thenReturn(false); + when(dataExtractor.next()).thenReturn(Optional.of(inputStream)); + when(dataExtractorFactory.newExtractor(anyLong(), anyLong())).thenReturn(dataExtractor); + datafeedJob.runRealtime(); + + verify(auditor, times(1)).warning(jobId, + Messages.getMessage(Messages.JOB_AUDIT_DATAFEED_MISSING_DATA, + 10, + XContentElasticsearchExtension.DEFAULT_DATE_PRINTER.print(2000))); } public void testEmptyDataCountGivenlookback() throws Exception { @@ -321,6 +363,6 @@ private DatafeedJob createDatafeedJob(long frequencyMs, long queryDelayMs, long long latestRecordTimeMs) { Supplier currentTimeSupplier = () -> currentTime; return new DatafeedJob(jobId, dataDescription.build(), frequencyMs, queryDelayMs, dataExtractorFactory, client, auditor, - currentTimeSupplier, latestFinalBucketEndTimeMs, latestRecordTimeMs); + currentTimeSupplier, delayedDataDetector, latestFinalBucketEndTimeMs, latestRecordTimeMs); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobValidatorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobValidatorTests.java index 35fd9bb98abf3..1507e106c61d3 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobValidatorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobValidatorTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedJobValidator; +import org.elasticsearch.xpack.core.ml.datafeed.DelayedDataCheckConfig; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.Detector; @@ -176,6 +177,30 @@ public void testVerify_FrequencyIsMultipleOfHistogramInterval() throws IOExcepti assertEquals("Datafeed frequency [1.5m] must be a multiple of the aggregation interval [60000ms]", e.getMessage()); } + public void testVerify_BucketIntervalAndDataCheckWindowAreValid() { + Job.Builder builder = buildJobBuilder("foo"); + AnalysisConfig.Builder ac = createAnalysisConfig(); + ac.setSummaryCountFieldName("some_count"); + ac.setBucketSpan(TimeValue.timeValueSeconds(2)); + builder.setAnalysisConfig(ac); + Job job = builder.build(new Date()); + DatafeedConfig.Builder datafeedBuilder = createValidDatafeedConfig(); + datafeedBuilder.setDelayedDataCheckConfig(DelayedDataCheckConfig.enabledDelayedDataCheckConfig(TimeValue.timeValueMinutes(10))); + + DatafeedJobValidator.validate(datafeedBuilder.build(), job); + + datafeedBuilder.setDelayedDataCheckConfig(DelayedDataCheckConfig.enabledDelayedDataCheckConfig(TimeValue.timeValueSeconds(1))); + ElasticsearchStatusException e = ESTestCase.expectThrows(ElasticsearchStatusException.class, + () -> DatafeedJobValidator.validate(datafeedBuilder.build(), job)); + assertEquals(Messages.getMessage(Messages.DATAFEED_CONFIG_DELAYED_DATA_CHECK_TOO_SMALL, "1s", "2s"), e.getMessage()); + + datafeedBuilder.setDelayedDataCheckConfig(DelayedDataCheckConfig.enabledDelayedDataCheckConfig(TimeValue.timeValueHours(24))); + e = ESTestCase.expectThrows(ElasticsearchStatusException.class, + () -> DatafeedJobValidator.validate(datafeedBuilder.build(), job)); + assertEquals(Messages.getMessage( + Messages.DATAFEED_CONFIG_DELAYED_DATA_CHECK_SPANS_TOO_MANY_BUCKETS, "1d", "2s"), e.getMessage()); + } + private static Job.Builder buildJobBuilder(String id) { Job.Builder builder = new Job.Builder(id); AnalysisConfig.Builder ac = createAnalysisConfig(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DelayedDataDetectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DelayedDataDetectorTests.java deleted file mode 100644 index 9a54181af9ce6..0000000000000 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DelayedDataDetectorTests.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.datafeed; - -import org.elasticsearch.client.Client; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; -import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; -import org.elasticsearch.xpack.core.ml.job.config.DataDescription; -import org.elasticsearch.xpack.core.ml.job.config.Detector; -import org.elasticsearch.xpack.core.ml.job.config.Job; - -import java.util.Collections; -import java.util.Date; - -import static org.hamcrest.Matchers.equalTo; -import static org.mockito.Mockito.mock; - - -public class DelayedDataDetectorTests extends ESTestCase { - - - public void testConstructorWithValueValues() { - TimeValue window = TimeValue.timeValueSeconds(10); - Job job = createJob(TimeValue.timeValueSeconds(1)); - DelayedDataDetector delayedDataDetector = new DelayedDataDetector(job, createDatafeed(), window, mock(Client.class)); - assertNotNull(delayedDataDetector); - } - - public void testConstructorWithInvalidValues() { - TimeValue shortWindow = TimeValue.timeValueMillis(500); - Job job = createJob(TimeValue.timeValueSeconds(1)); - - Exception exception = expectThrows(IllegalArgumentException.class, - ()-> new DelayedDataDetector(job, createDatafeed(), shortWindow, mock(Client.class))); - assertThat(exception.getMessage(), equalTo("[window] must be greater or equal to the [bucket_span]")); - - TimeValue longWindow = TimeValue.timeValueSeconds(20000); - - exception = expectThrows(IllegalArgumentException.class, - ()-> new DelayedDataDetector(job, createDatafeed(), longWindow, mock(Client.class))); - assertThat(exception.getMessage(), equalTo("[window] must contain less than 10000 buckets at the current [bucket_span]")); - } - - - private Job createJob(TimeValue bucketSpan) { - DataDescription.Builder dataDescription = new DataDescription.Builder(); - dataDescription.setFormat(DataDescription.DataFormat.XCONTENT); - dataDescription.setTimeField("time"); - dataDescription.setTimeFormat(DataDescription.EPOCH_MS); - - Detector.Builder d = new Detector.Builder("count", null); - AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(d.build())); - analysisConfig.setBucketSpan(bucketSpan); - - Job.Builder builder = new Job.Builder(); - builder.setId("test-job"); - builder.setAnalysisConfig(analysisConfig); - builder.setDataDescription(dataDescription); - return builder.build(new Date()); - } - - private DatafeedConfig createDatafeed() { - DatafeedConfig.Builder builder = new DatafeedConfig.Builder("id", "jobId"); - builder.setIndices(Collections.singletonList("index1")); - builder.setTypes(Collections.singletonList("doc")); - return builder.build(); - } - - - -} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactoryTests.java new file mode 100644 index 0000000000000..12cf97734c90d --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactoryTests.java @@ -0,0 +1,103 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.datafeed.delayeddatacheck; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DelayedDataCheckConfig; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; + +import java.util.Collections; +import java.util.Date; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; + + +public class DelayedDataDetectorFactoryTests extends ESTestCase { + + public void testBuilder() { + Job job = createJob(TimeValue.timeValueSeconds(2)); + + DatafeedConfig datafeedConfig = createDatafeed(false, null); + + // Should not throw + assertThat(DelayedDataDetectorFactory.buildDetector(job, datafeedConfig, mock(Client.class)), + instanceOf(NullDelayedDataDetector.class)); + + datafeedConfig = createDatafeed(true, TimeValue.timeValueMinutes(10)); + + // Should not throw + assertThat(DelayedDataDetectorFactory.buildDetector(job, datafeedConfig, mock(Client.class)), + instanceOf(DatafeedDelayedDataDetector.class)); + + DatafeedConfig tooSmallDatafeedConfig = createDatafeed(true, TimeValue.timeValueSeconds(1)); + IllegalArgumentException e = ESTestCase.expectThrows(IllegalArgumentException.class, + () -> DelayedDataDetectorFactory.buildDetector(job, tooSmallDatafeedConfig, mock(Client.class))); + assertEquals(Messages.getMessage(Messages.DATAFEED_CONFIG_DELAYED_DATA_CHECK_TOO_SMALL, "1s", "2s"), e.getMessage()); + + DatafeedConfig tooBigDatafeedConfig = createDatafeed(true, TimeValue.timeValueHours(12)); + e = ESTestCase.expectThrows(IllegalArgumentException.class, + () -> DelayedDataDetectorFactory.buildDetector(job, tooBigDatafeedConfig, mock(Client.class))); + assertEquals(Messages.getMessage( + Messages.DATAFEED_CONFIG_DELAYED_DATA_CHECK_SPANS_TOO_MANY_BUCKETS, "12h", "2s"), e.getMessage()); + + Job withBigBucketSpan = createJob(TimeValue.timeValueHours(3)); + datafeedConfig = createDatafeed(true, null); + + // Should not throw + DelayedDataDetector delayedDataDetector = + DelayedDataDetectorFactory.buildDetector(withBigBucketSpan, datafeedConfig, mock(Client.class)); + assertThat(delayedDataDetector.getWindow(), equalTo(TimeValue.timeValueHours(3).millis() * 8)); + + datafeedConfig = createDatafeed(true, null); + + // Should not throw + delayedDataDetector = + DelayedDataDetectorFactory.buildDetector(job, datafeedConfig, mock(Client.class)); + assertThat(delayedDataDetector.getWindow(), equalTo(TimeValue.timeValueHours(2).millis())); + + } + + private Job createJob(TimeValue bucketSpan) { + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setFormat(DataDescription.DataFormat.XCONTENT); + dataDescription.setTimeField("time"); + dataDescription.setTimeFormat(DataDescription.EPOCH_MS); + + Detector.Builder d = new Detector.Builder("count", null); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(d.build())); + analysisConfig.setBucketSpan(bucketSpan); + + Job.Builder builder = new Job.Builder(); + builder.setId("test-job"); + builder.setAnalysisConfig(analysisConfig); + builder.setDataDescription(dataDescription); + return builder.build(new Date()); + } + + private DatafeedConfig createDatafeed(boolean shouldDetectDelayedData, TimeValue delayedDatacheckWindow) { + DatafeedConfig.Builder builder = new DatafeedConfig.Builder("id", "jobId"); + builder.setIndices(Collections.singletonList("index1")); + builder.setTypes(Collections.singletonList("doc")); + + if (shouldDetectDelayedData) { + builder.setDelayedDataCheckConfig(DelayedDataCheckConfig.enabledDelayedDataCheckConfig(delayedDatacheckWindow)); + } else { + builder.setDelayedDataCheckConfig(DelayedDataCheckConfig.disabledDelayedDataCheckConfig()); + } + return builder.build(); + } + + +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/FlushListenerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/FlushListenerTests.java index 3bcedb523923e..3343882d581b8 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/FlushListenerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/FlushListenerTests.java @@ -60,7 +60,7 @@ public void testClear() throws Exception { } assertBusy(() -> assertEquals(numWaits, listener.awaitingFlushed.size())); assertThat(flushAcknowledgementHolders.stream().map(f -> f.get()).filter(f -> f != null).findAny().isPresent(), is(false)); - assertFalse(listener.cleared.get()); + assertFalse(listener.onClear.hasRun()); listener.clear(); @@ -68,6 +68,6 @@ public void testClear() throws Exception { assertBusy(() -> assertNotNull(f.get())); } assertTrue(listener.awaitingFlushed.isEmpty()); - assertTrue(listener.cleared.get()); + assertTrue(listener.onClear.hasRun()); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index 549083896108e..a43ad8c031b59 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -196,7 +196,7 @@ licenseState, threadContext, mock(ReservedRealm.class), Arrays.asList(firstRealm ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); tokenService = new TokenService(settings, Clock.systemUTC(), client, securityIndex, clusterService); service = new AuthenticationService(settings, realms, auditTrail, - new DefaultAuthenticationFailureHandler(), threadPool, new AnonymousUser(settings), tokenService); + new DefaultAuthenticationFailureHandler(Collections.emptyMap()), threadPool, new AnonymousUser(settings), tokenService); } @After @@ -461,8 +461,8 @@ public void testAutheticateTransportContextAndHeader() throws Exception { try { ThreadContext threadContext1 = threadPool1.getThreadContext(); service = new AuthenticationService(Settings.EMPTY, realms, auditTrail, - new DefaultAuthenticationFailureHandler(), threadPool1, new AnonymousUser(Settings.EMPTY), tokenService); - + new DefaultAuthenticationFailureHandler(Collections.emptyMap()), threadPool1, new AnonymousUser(Settings.EMPTY), + tokenService); threadContext1.putTransient(AuthenticationField.AUTHENTICATION_KEY, authRef.get()); threadContext1.putHeader(AuthenticationField.AUTHENTICATION_KEY, authHeaderRef.get()); @@ -485,7 +485,8 @@ public void testAutheticateTransportContextAndHeader() throws Exception { final String header; try (ThreadContext.StoredContext ignore = threadContext2.stashContext()) { service = new AuthenticationService(Settings.EMPTY, realms, auditTrail, - new DefaultAuthenticationFailureHandler(), threadPool2, new AnonymousUser(Settings.EMPTY), tokenService); + new DefaultAuthenticationFailureHandler(Collections.emptyMap()), threadPool2, new AnonymousUser(Settings.EMPTY), + tokenService); threadContext2.putHeader(AuthenticationField.AUTHENTICATION_KEY, authHeaderRef.get()); BytesStreamOutput output = new BytesStreamOutput(); @@ -498,7 +499,8 @@ public void testAutheticateTransportContextAndHeader() throws Exception { threadPool2.getThreadContext().putHeader(AuthenticationField.AUTHENTICATION_KEY, header); service = new AuthenticationService(Settings.EMPTY, realms, auditTrail, - new DefaultAuthenticationFailureHandler(), threadPool2, new AnonymousUser(Settings.EMPTY), tokenService); + new DefaultAuthenticationFailureHandler(Collections.emptyMap()), threadPool2, new AnonymousUser(Settings.EMPTY), + tokenService); service.authenticate("_action", new InternalMessage(), SystemUser.INSTANCE, ActionListener.wrap(result -> { assertThat(result, notNullValue()); assertThat(result.getUser(), equalTo(user1)); @@ -533,8 +535,8 @@ public void testAnonymousUserRest() throws Exception { } Settings settings = builder.build(); final AnonymousUser anonymousUser = new AnonymousUser(settings); - service = new AuthenticationService(settings, realms, auditTrail, new DefaultAuthenticationFailureHandler(), - threadPool, anonymousUser, tokenService); + service = new AuthenticationService(settings, realms, auditTrail, new DefaultAuthenticationFailureHandler(Collections.emptyMap()), + threadPool, anonymousUser, tokenService); RestRequest request = new FakeRestRequest(); Authentication result = authenticateBlocking(request); @@ -551,8 +553,8 @@ public void testAnonymousUserTransportNoDefaultUser() throws Exception { .putList(AnonymousUser.ROLES_SETTING.getKey(), "r1", "r2", "r3") .build(); final AnonymousUser anonymousUser = new AnonymousUser(settings); - service = new AuthenticationService(settings, realms, auditTrail, - new DefaultAuthenticationFailureHandler(), threadPool, anonymousUser, tokenService); + service = new AuthenticationService(settings, realms, auditTrail, new DefaultAuthenticationFailureHandler(Collections.emptyMap()), + threadPool, anonymousUser, tokenService); InternalMessage message = new InternalMessage(); Authentication result = authenticateBlocking("_action", message, null); @@ -566,8 +568,8 @@ public void testAnonymousUserTransportWithDefaultUser() throws Exception { .putList(AnonymousUser.ROLES_SETTING.getKey(), "r1", "r2", "r3") .build(); final AnonymousUser anonymousUser = new AnonymousUser(settings); - service = new AuthenticationService(settings, realms, auditTrail, - new DefaultAuthenticationFailureHandler(), threadPool, anonymousUser, tokenService); + service = new AuthenticationService(settings, realms, auditTrail, new DefaultAuthenticationFailureHandler(Collections.emptyMap()), + threadPool, anonymousUser, tokenService); InternalMessage message = new InternalMessage(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index 17562b13f0d74..e5f92ab979ba7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -226,7 +226,7 @@ public void setup() { return Void.TYPE; }).when(rolesStore).roles(any(Set.class), any(FieldPermissionsCache.class), any(ActionListener.class)); authorizationService = new AuthorizationService(settings, rolesStore, clusterService, - auditTrail, new DefaultAuthenticationFailureHandler(), threadPool, new AnonymousUser(settings)); + auditTrail, new DefaultAuthenticationFailureHandler(Collections.emptyMap()), threadPool, new AnonymousUser(settings)); } private void authorize(Authentication authentication, String action, TransportRequest request) { @@ -595,7 +595,7 @@ public void testDenialForAnonymousUser() { Settings settings = Settings.builder().put(AnonymousUser.ROLES_SETTING.getKey(), "a_all").build(); final AnonymousUser anonymousUser = new AnonymousUser(settings); authorizationService = new AuthorizationService(settings, rolesStore, clusterService, auditTrail, - new DefaultAuthenticationFailureHandler(), threadPool, anonymousUser); + new DefaultAuthenticationFailureHandler(Collections.emptyMap()), threadPool, anonymousUser); RoleDescriptor role = new RoleDescriptor("a_all", null, new IndicesPrivileges[] { IndicesPrivileges.builder().indices("a").privileges("all").build() }, null); @@ -620,7 +620,7 @@ public void testDenialForAnonymousUserAuthorizationExceptionDisabled() { .build(); final Authentication authentication = createAuthentication(new AnonymousUser(settings)); authorizationService = new AuthorizationService(settings, rolesStore, clusterService, auditTrail, - new DefaultAuthenticationFailureHandler(), threadPool, new AnonymousUser(settings)); + new DefaultAuthenticationFailureHandler(Collections.emptyMap()), threadPool, new AnonymousUser(settings)); RoleDescriptor role = new RoleDescriptor("a_all", null, new IndicesPrivileges[]{IndicesPrivileges.builder().indices("a").privileges("all").build()}, null); @@ -919,7 +919,7 @@ public void testAnonymousRolesAreAppliedToOtherUsers() { Settings settings = Settings.builder().put(AnonymousUser.ROLES_SETTING.getKey(), "anonymous_user_role").build(); final AnonymousUser anonymousUser = new AnonymousUser(settings); authorizationService = new AuthorizationService(settings, rolesStore, clusterService, auditTrail, - new DefaultAuthenticationFailureHandler(), threadPool, anonymousUser); + new DefaultAuthenticationFailureHandler(Collections.emptyMap()), threadPool, anonymousUser); roleMap.put("anonymous_user_role", new RoleDescriptor("anonymous_user_role", new String[]{"all"}, new IndicesPrivileges[]{IndicesPrivileges.builder().indices("a").privileges("all").build()}, null)); mockEmptyMetaData(); @@ -945,7 +945,7 @@ public void testAnonymousUserEnabledRoleAdded() { Settings settings = Settings.builder().put(AnonymousUser.ROLES_SETTING.getKey(), "anonymous_user_role").build(); final AnonymousUser anonymousUser = new AnonymousUser(settings); authorizationService = new AuthorizationService(settings, rolesStore, clusterService, auditTrail, - new DefaultAuthenticationFailureHandler(), threadPool, anonymousUser); + new DefaultAuthenticationFailureHandler(Collections.emptyMap()), threadPool, anonymousUser); roleMap.put("anonymous_user_role", new RoleDescriptor("anonymous_user_role", new String[]{"all"}, new IndicesPrivileges[]{IndicesPrivileges.builder().indices("a").privileges("all").build()}, null)); mockEmptyMetaData(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java index 31b9551b903dd..83edb189e2935 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java @@ -74,6 +74,7 @@ import org.junit.Before; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -189,7 +190,7 @@ public void setup() { ClusterService clusterService = mock(ClusterService.class); when(clusterService.getClusterSettings()).thenReturn(new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); authzService = new AuthorizationService(settings, rolesStore, clusterService, - mock(AuditTrailService.class), new DefaultAuthenticationFailureHandler(), mock(ThreadPool.class), + mock(AuditTrailService.class), new DefaultAuthenticationFailureHandler(Collections.emptyMap()), mock(ThreadPool.class), new AnonymousUser(settings)); defaultIndicesResolver = new IndicesAndAliasesResolver(settings, clusterService); } diff --git a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java index 4f841e02ae3d5..66ac2e2c7df24 100644 --- a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java @@ -12,4 +12,11 @@ public class JdbcCsvSpecIT extends CsvSpecTestCase { public JdbcCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { super(fileName, groupName, testName, lineNumber, testCase); } + + @Override + protected int fetchSize() { + // using a smaller fetchSize for nested documents' tests to uncover bugs + // similar with https://github.com/elastic/elasticsearch/issues/35176 quicker + return fileName.startsWith("nested") && randomBoolean() ? randomIntBetween(1,5) : super.fetchSize(); + } } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/FetchSizeTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/FetchSizeTestCase.java index e0a5bd26db0a2..fee5901bc4cb2 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/FetchSizeTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/FetchSizeTestCase.java @@ -6,6 +6,9 @@ package org.elasticsearch.xpack.sql.qa.jdbc; import org.elasticsearch.client.Request; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.junit.Before; import java.io.IOException; @@ -23,12 +26,42 @@ public class FetchSizeTestCase extends JdbcIntegrationTestCase { @Before public void createTestIndex() throws IOException { - Request request = new Request("PUT", "/test/doc/_bulk"); + Request request = new Request("PUT", "/test"); + XContentBuilder createIndex = JsonXContent.contentBuilder().startObject(); + createIndex.startObject("mappings"); + { + createIndex.startObject("doc"); + { + createIndex.startObject("properties"); + { + createIndex.startObject("nested").field("type", "nested"); + createIndex.startObject("properties"); + createIndex.startObject("inner_field").field("type", "integer").endObject(); + createIndex.endObject(); + createIndex.endObject(); + } + createIndex.endObject(); + } + createIndex.endObject(); + } + createIndex.endObject().endObject(); + request.setJsonEntity(Strings.toString(createIndex)); + client().performRequest(request); + + request = new Request("PUT", "/test/doc/_bulk"); request.addParameter("refresh", "true"); StringBuilder bulk = new StringBuilder(); + StringBuilder bulkLine; for (int i = 0; i < 20; i++) { bulk.append("{\"index\":{}}\n"); - bulk.append("{\"test_field\":" + i + "}\n"); + bulkLine = new StringBuilder("{\"test_field\":" + i); + bulkLine.append(", \"nested\":["); + // each document will have a nested field with 1 - 5 values + for (int j = 0; j <= i % 5; j++) { + bulkLine.append("{\"inner_field\":" + j + "}" + ((j == i % 5) ? "" : ",")); + } + bulkLine.append("]"); + bulk.append(bulkLine).append("}\n"); } request.setJsonEntity(bulk.toString()); client().performRequest(request); @@ -92,4 +125,32 @@ public void testAggregation() throws SQLException { } } } + + /** + * Test for nested documents. + */ + public void testNestedDocuments() throws Exception { + try (Connection c = esJdbc(); + Statement s = c.createStatement()) { + s.setFetchSize(5); + try (ResultSet rs = s.executeQuery("SELECT test_field, nested.* FROM test ORDER BY test_field ASC")) { + assertTrue("Empty result set!", rs.next()); + for (int i = 0; i < 20; i++) { + assertEquals(15, rs.getFetchSize()); + assertNestedDocuments(rs, i); + } + assertFalse(rs.next()); + } + } + assertNoSearchContexts(); + } + + private void assertNestedDocuments(ResultSet rs, int i) throws SQLException { + for (int j = 0; j <= i % 5; j++) { + assertEquals(i, rs.getInt(1)); + assertEquals(j, rs.getInt(2)); + // don't check the very last row in the result set + assertTrue("No more entries left after row " + rs.getRow(), (i+j == 23 || rs.next())); + } + } } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java index a7d0332508f6d..682fb824b70c2 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java @@ -103,10 +103,14 @@ public final void test() throws Throwable { protected ResultSet executeJdbcQuery(Connection con, String query) throws SQLException { Statement statement = con.createStatement(); - statement.setFetchSize(between(1, 500)); + statement.setFetchSize(fetchSize()); return statement.executeQuery(query); } + protected int fetchSize() { + return between(1, 500); + } + // TODO: use UTC for now until deciding on a strategy for handling date extraction @Override protected Properties connectionProperties() { diff --git a/x-pack/plugin/sql/qa/src/main/resources/dep_emp.csv b/x-pack/plugin/sql/qa/src/main/resources/dep_emp.csv index ece933b394130..5b669eaf451cc 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/dep_emp.csv +++ b/x-pack/plugin/sql/qa/src/main/resources/dep_emp.csv @@ -107,5 +107,7 @@ emp_no,dep_id,from_date,to_date 10097,d008,1990-09-15,9999-01-01 10098,d004,1985-05-13,1989-06-29 10098,d009,1989-06-29,1992-12-11 +10098,d008,1992-12-11,1993-05-05 +10098,d007,1993-05-05,1994-02-01 10099,d007,1988-10-18,9999-01-01 10100,d003,1987-09-21,9999-01-01 diff --git a/x-pack/plugin/sql/qa/src/main/resources/nested.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/nested.csv-spec index 89808901e9cff..3be0547fd43d8 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/nested.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/nested.csv-spec @@ -102,7 +102,7 @@ Mayuko | 12 selectWithScalarOnNested SELECT first_name f, last_name l, YEAR(dep.from_date) start FROM test_emp WHERE dep.dep_name = 'Production' AND languages > 1 ORDER BY start LIMIT 5; -f:s | l:s | start:i +f:s | l:s | start:i Sreekrishna |Servieres |1985 Zhongwei |Rosen |1986 @@ -137,7 +137,7 @@ d003 |Azuma d002 |Baek d003 |Baek d004 |Bamford -; +; selectNestedFieldLast SELECT first_name, dep.dep_id FROM test_emp ORDER BY first_name LIMIT 5; @@ -222,3 +222,78 @@ Anneke |d005 |Development |1990-08-05T00:00:00.000Z|9999-01 Anoosh |d005 |Development |1991-08-30T00:00:00.000Z|9999-01-01T00:00:00.000Z|Peyn Arumugam |d008 |Research |1987-04-18T00:00:00.000Z|1997-11-08T00:00:00.000Z|Ossenbruggen ; + +// +// Nested documents tests more targetted for JdbcCsvNestedDocsIT class (with specific fetch_size value) +// + +// employee 10098 has 4 departments + +selectNestedFieldWithFourInnerHitsAndLimitOne +SELECT dep.dep_id, dep.dep_name, first_name, emp_no FROM test_emp WHERE emp_no=10098 LIMIT 1; + + dep.dep_id:s | dep.dep_name:s | first_name:s | emp_no:i +---------------+----------------+---------------+--------------- +d004 |Production |Sreekrishna |10098 +; + +selectNestedFieldWithFourInnerHitsAndLimitTwo +SELECT dep.dep_id, dep.dep_name, first_name, emp_no FROM test_emp WHERE emp_no=10098 LIMIT 2; + + dep.dep_id:s | dep.dep_name:s | first_name:s | emp_no:i +---------------+----------------+---------------+--------------- +d004 |Production |Sreekrishna |10098 +d009 |Customer Service|Sreekrishna |10098 +; + +selectNestedFieldWithFourInnerHitsAndLimitThree +SELECT dep.dep_id, dep.dep_name, first_name, emp_no FROM test_emp WHERE emp_no=10098 LIMIT 3; + + dep.dep_id:s | dep.dep_name:s | first_name:s | emp_no:i +---------------+----------------+---------------+--------------- +d004 |Production |Sreekrishna |10098 +d009 |Customer Service|Sreekrishna |10098 +d008 |Research |Sreekrishna |10098 +; + +selectNestedFieldWithFourInnerHitsAndLimitFour +SELECT dep.dep_id, dep.dep_name, first_name, emp_no FROM test_emp WHERE emp_no=10098 LIMIT 4; + + dep.dep_id:s | dep.dep_name:s | first_name:s | emp_no:i +---------------+----------------+---------------+--------------- +d004 |Production |Sreekrishna |10098 +d009 |Customer Service|Sreekrishna |10098 +d008 |Research |Sreekrishna |10098 +d007 |Sales |Sreekrishna |10098 +; + +selectNestedFieldWithFourInnerHitsAndLimitFive +SELECT dep.dep_id, dep.dep_name, first_name, emp_no FROM test_emp WHERE emp_no=10098 LIMIT 5; + + dep.dep_id:s | dep.dep_name:s | first_name:s | emp_no:i +---------------+----------------+---------------+--------------- +d004 |Production |Sreekrishna |10098 +d009 |Customer Service|Sreekrishna |10098 +d008 |Research |Sreekrishna |10098 +d007 |Sales |Sreekrishna |10098 +; + +selectNestedFieldFromTwoDocumentsWithFourInnerHitsAndLimitFive +SELECT dep.dep_id, dep.dep_name, first_name, emp_no FROM test_emp WHERE emp_no=10098 OR emp_no=10099 LIMIT 5; + + dep.dep_id:s | dep.dep_name:s | first_name:s | emp_no:i +---------------+----------------+---------------+--------------- +d004 |Production |Sreekrishna |10098 +d009 |Customer Service|Sreekrishna |10098 +d008 |Research |Sreekrishna |10098 +d007 |Sales |Sreekrishna |10098 +d007 |Sales |Valter |10099 +; + +selectNestedFieldFromDocumentWithOneInnerHitAndLimitOne +SELECT dep.dep_id, dep.dep_name, first_name, emp_no FROM test_emp WHERE emp_no=10099 LIMIT 1; + + dep.dep_id:s | dep.dep_name:s | first_name:s | emp_no:i +---------------+----------------+---------------+--------------- +d007 |Sales |Valter |10099 +; \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/CliFormatter.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/CliFormatter.java index c773e75aa18be..7daf768ee9cfb 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/CliFormatter.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/CliFormatter.java @@ -9,8 +9,10 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xpack.sql.proto.ColumnInfo; +import org.elasticsearch.xpack.sql.proto.DateUtils; import java.io.IOException; +import java.time.ZonedDateTime; import java.util.Arrays; import java.util.List; import java.util.Objects; @@ -24,7 +26,7 @@ public class CliFormatter implements Writeable { * The minimum width for any column in the formatted results. */ private static final int MIN_COLUMN_WIDTH = 15; - + private int[] width; /** @@ -45,7 +47,7 @@ public CliFormatter(List columns, List> rows) { for (int i = 0; i < width.length; i++) { // TODO are we sure toString is correct here? What about dates that come back as longs. // Tracked by https://github.com/elastic/x-pack-elasticsearch/issues/3081 - width[i] = Math.max(width[i], Objects.toString(row.get(i)).length()); + width[i] = Math.max(width[i], toString(row.get(i)).length()); } } } @@ -116,10 +118,10 @@ private String formatWithoutHeader(StringBuilder sb, List> rows) { if (i > 0) { sb.append('|'); } - // TODO are we sure toString is correct here? What about dates that come back as longs. // Tracked by https://github.com/elastic/x-pack-elasticsearch/issues/3081 - String string = Objects.toString(row.get(i)); + String string = toString(row.get(i)); + if (string.length() <= width[i]) { // Pad sb.append(string); @@ -138,6 +140,14 @@ private String formatWithoutHeader(StringBuilder sb, List> rows) { return sb.toString(); } + private static String toString(Object object) { + if (object instanceof ZonedDateTime) { + return DateUtils.toString((ZonedDateTime) object); + } else { + return Objects.toString(object); + } + } + /** * Pick a good estimate of the buffer size needed to contain the rows. */ @@ -154,8 +164,12 @@ int estimateSize(int rows) { @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } CliFormatter that = (CliFormatter) o; return Arrays.equals(width, that.width); } diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java index da4037ac95c64..ff7cb02781a56 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java @@ -13,11 +13,12 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.sql.proto.ColumnInfo; +import org.elasticsearch.xpack.sql.proto.DateUtils; import org.elasticsearch.xpack.sql.proto.Mode; -import org.joda.time.ReadableDateTime; import java.io.IOException; import java.sql.JDBCType; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.List; import java.util.Objects; @@ -167,9 +168,17 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws * Serializes the provided value in SQL-compatible way based on the client mode */ public static XContentBuilder value(XContentBuilder builder, Mode mode, Object value) throws IOException { - if (Mode.isDriver(mode) && value instanceof ReadableDateTime) { - // JDBC cannot parse dates in string format - builder.value(((ReadableDateTime) value).getMillis()); + if (value instanceof ZonedDateTime) { + ZonedDateTime zdt = (ZonedDateTime) value; + if (Mode.isDriver(mode)) { + // JDBC cannot parse dates in string format and ODBC can have issues with it + // so instead, use the millis since epoch (in UTC) + builder.value(zdt.toInstant().toEpochMilli()); + } + // otherwise use the ISO format + else { + builder.value(DateUtils.toString(zdt)); + } } else { builder.value(value); } diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/DateUtils.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/DateUtils.java new file mode 100644 index 0000000000000..c087affe4ccc5 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/DateUtils.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.proto; + +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeFormatterBuilder; +import java.util.Locale; + +import static java.time.format.DateTimeFormatter.ISO_LOCAL_DATE; +import static java.time.temporal.ChronoField.HOUR_OF_DAY; +import static java.time.temporal.ChronoField.MILLI_OF_SECOND; +import static java.time.temporal.ChronoField.MINUTE_OF_HOUR; +import static java.time.temporal.ChronoField.SECOND_OF_MINUTE; + +public class DateUtils { + + private static final DateTimeFormatter ISO_WITH_MILLIS = new DateTimeFormatterBuilder() + .parseCaseInsensitive() + .append(ISO_LOCAL_DATE) + .appendLiteral('T') + .appendValue(HOUR_OF_DAY, 2) + .appendLiteral(':') + .appendValue(MINUTE_OF_HOUR, 2) + .appendLiteral(':') + .appendValue(SECOND_OF_MINUTE, 2) + .appendFraction(MILLI_OF_SECOND, 3, 3, true) + .appendOffsetId() + .toFormatter(Locale.ROOT); + + private DateUtils() {} + + + public static String toString(ZonedDateTime dateTime) { + return dateTime.format(ISO_WITH_MILLIS); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java index f3a397dc68b3d..df6859cc6351b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java @@ -318,19 +318,20 @@ protected void handleResponse(SearchResponse response, ActionListener 0) { String scrollId = response.getScrollId(); - + SchemaSearchHitRowSet hitRowSet = new SchemaSearchHitRowSet(schema, exts, hits, query.limit(), scrollId); + // if there's an id, try to setup next scroll if (scrollId != null && // is all the content already retrieved? - (Boolean.TRUE.equals(response.isTerminatedEarly()) || response.getHits().getTotalHits() == hits.length - // or maybe the limit has been reached - || (hits.length >= query.limit() && query.limit() > -1))) { + (Boolean.TRUE.equals(response.isTerminatedEarly()) + || response.getHits().getTotalHits() == hits.length + || hitRowSet.isLimitReached())) { // if so, clear the scroll clear(response.getScrollId(), ActionListener.wrap( succeeded -> listener.onResponse(new SchemaSearchHitRowSet(schema, exts, hits, query.limit(), null)), listener::onFailure)); } else { - listener.onResponse(new SchemaSearchHitRowSet(schema, exts, hits, query.limit(), scrollId)); + listener.onResponse(hitRowSet); } } // no hits diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitRowSet.java index e8994bf108f8e..ba3682df5cc23 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitRowSet.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitRowSet.java @@ -23,7 +23,6 @@ class SearchHitRowSet extends AbstractRowSet { private final SearchHit[] hits; private final Cursor cursor; - private final String scrollId; private final List extractors; private final Set innerHits = new LinkedHashSet<>(); private final String innerHit; @@ -35,7 +34,6 @@ class SearchHitRowSet extends AbstractRowSet { SearchHitRowSet(List exts, SearchHit[] hits, int limit, String scrollId) { this.hits = hits; - this.scrollId = scrollId; this.extractors = exts; // Since the results might contain nested docs, the iteration is similar to that of Aggregation @@ -91,6 +89,10 @@ class SearchHitRowSet extends AbstractRowSet { } } } + + protected boolean isLimitReached() { + return cursor == Cursor.EMPTY; + } @Override public int columnCount() { @@ -166,10 +168,6 @@ public int size() { return size; } - public String scrollId() { - return scrollId; - } - @Override public Cursor nextPageCursor() { return cursor; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractor.java index e8e2db4f052b2..c799ab27dcab2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractor.java @@ -5,16 +5,15 @@ */ package org.elasticsearch.xpack.sql.execution.search.extractor; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.querydsl.container.GroupByRef.Property; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; +import org.elasticsearch.xpack.sql.util.DateUtils; import java.io.IOException; +import java.time.ZoneId; import java.util.Map; import java.util.Objects; import java.util.TimeZone; @@ -29,6 +28,7 @@ public class CompositeKeyExtractor implements BucketExtractor { private final String key; private final Property property; private final TimeZone timeZone; + private final ZoneId zoneId; /** * Constructs a new CompositeKeyExtractor instance. @@ -38,40 +38,29 @@ public CompositeKeyExtractor(String key, Property property, TimeZone timeZone) { this.key = key; this.property = property; this.timeZone = timeZone; + this.zoneId = timeZone != null ? timeZone.toZoneId() : null; } CompositeKeyExtractor(StreamInput in) throws IOException { key = in.readString(); property = in.readEnum(Property.class); - if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - if (in.readBoolean()) { - timeZone = TimeZone.getTimeZone(in.readString()); - } else { - timeZone = null; - } + if (in.readBoolean()) { + timeZone = TimeZone.getTimeZone(in.readString()); } else { - DateTimeZone dtz = in.readOptionalTimeZone(); - if (dtz == null) { - timeZone = null; - } else { - timeZone = dtz.toTimeZone(); - } + timeZone = null; } + this.zoneId = timeZone != null ? timeZone.toZoneId() : null; } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(key); out.writeEnum(property); - if (out.getVersion().onOrAfter(Version.V_6_3_0)) { - if (timeZone == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeString(timeZone.getID()); - } + if (timeZone == null) { + out.writeBoolean(false); } else { - out.writeOptionalTimeZone(timeZone == null ? null : DateTimeZone.forTimeZone(timeZone)); + out.writeBoolean(true); + out.writeString(timeZone.getID()); } } @@ -110,7 +99,7 @@ public Object extract(Bucket bucket) { if (object == null) { return object; } else if (object instanceof Long) { - object = new DateTime(((Long) object).longValue(), DateTimeZone.forTimeZone(timeZone)); + object = DateUtils.of(((Long) object).longValue(), zoneId); } else { throw new SqlIllegalArgumentException("Invalid date key returned: {}", object); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java index 66e177530547f..3284efa54c8f4 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java @@ -13,9 +13,8 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.util.DateUtils; import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.ReadableDateTime; import java.io.IOException; import java.util.List; @@ -136,11 +135,16 @@ private Object unwrapMultiValue(Object values) { if (values instanceof Map) { throw new SqlIllegalArgumentException("Objects (returned by [{}]) are not supported", fieldName); } - if (values instanceof String && dataType == DataType.DATE) { - return new DateTime(Long.parseLong(values.toString()), DateTimeZone.UTC); + if (dataType == DataType.DATE) { + if (values instanceof String) { + return DateUtils.of(Long.parseLong(values.toString())); + } + // returned by nested types... + if (values instanceof DateTime) { + return DateUtils.of((DateTime) values); + } } - if (values instanceof Long || values instanceof Double || values instanceof String || values instanceof Boolean - || values instanceof ReadableDateTime) { + if (values instanceof Long || values instanceof Double || values instanceof String || values instanceof Boolean) { return values; } throw new SqlIllegalArgumentException("Type {} (returned by [{}]) is not supported", values.getClass().getSimpleName(), fieldName); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java index 130acd8eddcd3..cfee964b01e62 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java @@ -12,19 +12,22 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo; -import org.joda.time.DateTime; +import java.time.ZoneId; +import java.time.ZonedDateTime; import java.util.Objects; import java.util.TimeZone; abstract class BaseDateTimeFunction extends UnaryScalarFunction { private final TimeZone timeZone; + private final ZoneId zoneId; private final String name; BaseDateTimeFunction(Location location, Expression field, TimeZone timeZone) { super(location, field); this.timeZone = timeZone; + this.zoneId = timeZone != null ? timeZone.toZoneId() : null; StringBuilder sb = new StringBuilder(super.name()); // add timezone as last argument @@ -61,15 +64,15 @@ public boolean foldable() { @Override public Object fold() { - DateTime folded = (DateTime) field().fold(); + ZonedDateTime folded = (ZonedDateTime) field().fold(); if (folded == null) { return null; } - return doFold(folded.getMillis(), timeZone().getID()); + return doFold(folded.withZoneSameInstant(zoneId)); } - protected abstract Object doFold(long millis, String tzId); + protected abstract Object doFold(ZonedDateTime dateTime); @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeProcessor.java index c8f42704ac525..ce6bd1ad470aa 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeProcessor.java @@ -10,21 +10,25 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; -import org.joda.time.ReadableInstant; import java.io.IOException; +import java.time.ZoneId; +import java.time.ZonedDateTime; import java.util.TimeZone; public abstract class BaseDateTimeProcessor implements Processor { private final TimeZone timeZone; + private final ZoneId zoneId; BaseDateTimeProcessor(TimeZone timeZone) { this.timeZone = timeZone; + this.zoneId = timeZone.toZoneId(); } BaseDateTimeProcessor(StreamInput in) throws IOException { timeZone = TimeZone.getTimeZone(in.readString()); + zoneId = timeZone.toZoneId(); } @Override @@ -37,23 +41,17 @@ TimeZone timeZone() { } @Override - public Object process(Object l) { - if (l == null) { + public Object process(Object input) { + if (input == null) { return null; } - long millis; - if (l instanceof String) { - // 6.4+ - millis = Long.parseLong(l.toString()); - } else if (l instanceof ReadableInstant) { - // 6.3- - millis = ((ReadableInstant) l).getMillis(); - } else { - throw new SqlIllegalArgumentException("A string or a date is required; received {}", l); + + if (!(input instanceof ZonedDateTime)) { + throw new SqlIllegalArgumentException("A date is required; received {}", input); } - - return doProcess(millis); + + return doProcess(((ZonedDateTime) input).withZoneSameInstant(zoneId)); } - abstract Object doProcess(long millis); -} + abstract Object doProcess(ZonedDateTime dateTime); +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java index 8d5a384b1f456..cad8265a03d65 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java @@ -14,7 +14,6 @@ import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.type.DataType; -import java.time.Instant; import java.time.ZoneId; import java.time.ZonedDateTime; import java.time.temporal.ChronoField; @@ -24,23 +23,25 @@ public abstract class DateTimeFunction extends BaseDateTimeFunction { - DateTimeFunction(Location location, Expression field, TimeZone timeZone) { + private final DateTimeExtractor extractor; + + DateTimeFunction(Location location, Expression field, TimeZone timeZone, DateTimeExtractor extractor) { super(location, field, timeZone); + this.extractor = extractor; } @Override - protected Object doFold(long millis, String tzId) { - return dateTimeChrono(millis, tzId, chronoField().name()); + protected Object doFold(ZonedDateTime dateTime) { + return dateTimeChrono(dateTime, extractor.chronoField()); } - public static Integer dateTimeChrono(long millis, String tzId, String chronoName) { - ZonedDateTime time = ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneId.of(tzId)); - return Integer.valueOf(time.get(ChronoField.valueOf(chronoName))); + public static Integer dateTimeChrono(ZonedDateTime dateTime, String tzId, String chronoName) { + ZonedDateTime zdt = dateTime.withZoneSameInstant(ZoneId.of(tzId)); + return dateTimeChrono(zdt, ChronoField.valueOf(chronoName)); } - public static Integer dateTimeChrono(ZonedDateTime millis, String tzId, String chronoName) { - ZonedDateTime time = millis.withZoneSameInstant(ZoneId.of(tzId)); - return Integer.valueOf(time.get(ChronoField.valueOf(chronoName))); + private static Integer dateTimeChrono(ZonedDateTime dateTime, ChronoField field) { + return Integer.valueOf(dateTime.get(field)); } @Override @@ -51,21 +52,14 @@ public ScriptTemplate scriptWithField(FieldAttribute field) { template = formatTemplate("{sql}.dateTimeChrono(doc[{}].value, {}, {})"); params.variable(field.name()) .variable(timeZone().getID()) - .variable(chronoField().name()); + .variable(extractor.chronoField().name()); return new ScriptTemplate(template, params.build(), dataType()); } - /** - * Used for generating the painless script version of this function when the time zone is not UTC - */ - protected abstract ChronoField chronoField(); - - protected abstract DateTimeExtractor extractor(); - @Override protected Processor makeProcessor() { - return new DateTimeProcessor(extractor(), timeZone()); + return new DateTimeProcessor(extractor, timeZone()); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeHistogramFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeHistogramFunction.java index bb5aaea61fb3f..60d39e7ea30bc 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeHistogramFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeHistogramFunction.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; import org.elasticsearch.xpack.sql.tree.Location; import java.util.TimeZone; @@ -16,8 +17,8 @@ */ public abstract class DateTimeHistogramFunction extends DateTimeFunction { - DateTimeHistogramFunction(Location location, Expression field, TimeZone timeZone) { - super(location, field, timeZone); + DateTimeHistogramFunction(Location location, Expression field, TimeZone timeZone, DateTimeExtractor extractor) { + super(location, field, timeZone, extractor); } /** diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java index d34b1c1e39053..d1a19a5ba014a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java @@ -7,38 +7,40 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.joda.time.DateTime; -import org.joda.time.DateTimeFieldType; -import org.joda.time.DateTimeZone; -import org.joda.time.ReadableDateTime; import java.io.IOException; +import java.time.ZonedDateTime; +import java.time.temporal.ChronoField; import java.util.Objects; import java.util.TimeZone; public class DateTimeProcessor extends BaseDateTimeProcessor { public enum DateTimeExtractor { - DAY_OF_MONTH(DateTimeFieldType.dayOfMonth()), - DAY_OF_WEEK(DateTimeFieldType.dayOfWeek()), - DAY_OF_YEAR(DateTimeFieldType.dayOfYear()), - HOUR_OF_DAY(DateTimeFieldType.hourOfDay()), - MINUTE_OF_DAY(DateTimeFieldType.minuteOfDay()), - MINUTE_OF_HOUR(DateTimeFieldType.minuteOfHour()), - MONTH_OF_YEAR(DateTimeFieldType.monthOfYear()), - SECOND_OF_MINUTE(DateTimeFieldType.secondOfMinute()), - WEEK_OF_YEAR(DateTimeFieldType.weekOfWeekyear()), - YEAR(DateTimeFieldType.year()); - - private final DateTimeFieldType field; - - DateTimeExtractor(DateTimeFieldType field) { + DAY_OF_MONTH(ChronoField.DAY_OF_MONTH), + DAY_OF_WEEK(ChronoField.DAY_OF_WEEK), + DAY_OF_YEAR(ChronoField.DAY_OF_YEAR), + HOUR_OF_DAY(ChronoField.HOUR_OF_DAY), + MINUTE_OF_DAY(ChronoField.MINUTE_OF_DAY), + MINUTE_OF_HOUR(ChronoField.MINUTE_OF_HOUR), + MONTH_OF_YEAR(ChronoField.MONTH_OF_YEAR), + SECOND_OF_MINUTE(ChronoField.SECOND_OF_MINUTE), + WEEK_OF_YEAR(ChronoField.ALIGNED_WEEK_OF_YEAR), + YEAR(ChronoField.YEAR); + + private final ChronoField field; + + DateTimeExtractor(ChronoField field) { this.field = field; } - public int extract(ReadableDateTime dt) { + public int extract(ZonedDateTime dt) { return dt.get(field); } + + public ChronoField chronoField() { + return field; + } } public static final String NAME = "dt"; @@ -70,10 +72,8 @@ DateTimeExtractor extractor() { } @Override - public Object doProcess(long millis) { - ReadableDateTime dt = new DateTime(millis, DateTimeZone.forTimeZone(timeZone())); - - return extractor.extract(dt); + public Object doProcess(ZonedDateTime dateTime) { + return extractor.extract(dateTime); } @Override @@ -95,4 +95,4 @@ public boolean equals(Object obj) { public String toString() { return extractor.toString(); } -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfMonth.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfMonth.java index ebb576b4648e1..3c402ef2f4a8d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfMonth.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfMonth.java @@ -10,7 +10,6 @@ import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; -import java.time.temporal.ChronoField; import java.util.TimeZone; /** @@ -18,7 +17,7 @@ */ public class DayOfMonth extends DateTimeFunction { public DayOfMonth(Location location, Expression field, TimeZone timeZone) { - super(location, field, timeZone); + super(location, field, timeZone, DateTimeExtractor.DAY_OF_MONTH); } @Override @@ -35,14 +34,4 @@ protected DayOfMonth replaceChild(Expression newChild) { public String dateTimeFormat() { return "d"; } - - @Override - protected ChronoField chronoField() { - return ChronoField.DAY_OF_MONTH; - } - - @Override - protected DateTimeExtractor extractor() { - return DateTimeExtractor.DAY_OF_MONTH; - } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfWeek.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfWeek.java index d840d4d71df0a..fbfd9c9861768 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfWeek.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfWeek.java @@ -10,7 +10,6 @@ import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; -import java.time.temporal.ChronoField; import java.util.TimeZone; /** @@ -18,7 +17,7 @@ */ public class DayOfWeek extends DateTimeFunction { public DayOfWeek(Location location, Expression field, TimeZone timeZone) { - super(location, field, timeZone); + super(location, field, timeZone, DateTimeExtractor.DAY_OF_WEEK); } @Override @@ -35,14 +34,4 @@ protected DayOfWeek replaceChild(Expression newChild) { public String dateTimeFormat() { return "e"; } - - @Override - protected ChronoField chronoField() { - return ChronoField.DAY_OF_WEEK; - } - - @Override - protected DateTimeExtractor extractor() { - return DateTimeExtractor.DAY_OF_WEEK; - } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYear.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYear.java index 1fa248d9c2063..a6b843bd0bd04 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYear.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYear.java @@ -11,7 +11,6 @@ import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; -import java.time.temporal.ChronoField; import java.util.TimeZone; /** @@ -19,7 +18,7 @@ */ public class DayOfYear extends DateTimeFunction { public DayOfYear(Location location, Expression field, TimeZone timeZone) { - super(location, field, timeZone); + super(location, field, timeZone, DateTimeExtractor.DAY_OF_YEAR); } @Override @@ -36,14 +35,4 @@ protected UnaryScalarFunction replaceChild(Expression newChild) { public String dateTimeFormat() { return "D"; } - - @Override - protected ChronoField chronoField() { - return ChronoField.DAY_OF_YEAR; - } - - @Override - protected DateTimeExtractor extractor() { - return DateTimeExtractor.DAY_OF_YEAR; - } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/HourOfDay.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/HourOfDay.java index 4df28bddad088..193a14c09327d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/HourOfDay.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/HourOfDay.java @@ -10,7 +10,6 @@ import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; -import java.time.temporal.ChronoField; import java.util.TimeZone; /** @@ -18,7 +17,7 @@ */ public class HourOfDay extends DateTimeFunction { public HourOfDay(Location location, Expression field, TimeZone timeZone) { - super(location, field, timeZone); + super(location, field, timeZone, DateTimeExtractor.HOUR_OF_DAY); } @Override @@ -35,14 +34,4 @@ protected HourOfDay replaceChild(Expression newChild) { public String dateTimeFormat() { return "hour"; } - - @Override - protected ChronoField chronoField() { - return ChronoField.HOUR_OF_DAY; - } - - @Override - protected DateTimeExtractor extractor() { - return DateTimeExtractor.HOUR_OF_DAY; - } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfDay.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfDay.java index ef0fb0bce18aa..25ef41a18cac8 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfDay.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfDay.java @@ -10,7 +10,6 @@ import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; -import java.time.temporal.ChronoField; import java.util.TimeZone; /** @@ -19,7 +18,7 @@ public class MinuteOfDay extends DateTimeFunction { public MinuteOfDay(Location location, Expression field, TimeZone timeZone) { - super(location, field, timeZone); + super(location, field, timeZone, DateTimeExtractor.MINUTE_OF_DAY); } @Override @@ -36,14 +35,4 @@ protected MinuteOfDay replaceChild(Expression newChild) { public String dateTimeFormat() { throw new UnsupportedOperationException("is there a format for it?"); } - - @Override - protected ChronoField chronoField() { - return ChronoField.MINUTE_OF_DAY; - } - - @Override - protected DateTimeExtractor extractor() { - return DateTimeExtractor.MINUTE_OF_DAY; - } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java index f5ab095ef2455..798b700723724 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java @@ -10,7 +10,6 @@ import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; -import java.time.temporal.ChronoField; import java.util.TimeZone; /** @@ -18,7 +17,7 @@ */ public class MinuteOfHour extends DateTimeFunction { public MinuteOfHour(Location location, Expression field, TimeZone timeZone) { - super(location, field, timeZone); + super(location, field, timeZone, DateTimeExtractor.MINUTE_OF_HOUR); } @Override @@ -35,14 +34,4 @@ protected MinuteOfHour replaceChild(Expression newChild) { public String dateTimeFormat() { return "m"; } - - @Override - protected ChronoField chronoField() { - return ChronoField.MINUTE_OF_HOUR; - } - - @Override - protected DateTimeExtractor extractor() { - return DateTimeExtractor.MINUTE_OF_HOUR; - } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthOfYear.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthOfYear.java index 503a771611e7d..9231987b5add2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthOfYear.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthOfYear.java @@ -10,7 +10,6 @@ import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; -import java.time.temporal.ChronoField; import java.util.TimeZone; /** @@ -18,7 +17,7 @@ */ public class MonthOfYear extends DateTimeFunction { public MonthOfYear(Location location, Expression field, TimeZone timeZone) { - super(location, field, timeZone); + super(location, field, timeZone, DateTimeExtractor.MONTH_OF_YEAR); } @Override @@ -35,14 +34,4 @@ protected MonthOfYear replaceChild(Expression newChild) { public String dateTimeFormat() { return "M"; } - - @Override - protected ChronoField chronoField() { - return ChronoField.MONTH_OF_YEAR; - } - - @Override - protected DateTimeExtractor extractor() { - return DateTimeExtractor.MONTH_OF_YEAR; - } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeFunction.java index a8e6e02057a22..4ec42def0ebf3 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeFunction.java @@ -14,6 +14,7 @@ import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.util.StringUtils; +import java.time.ZonedDateTime; import java.util.Locale; import java.util.TimeZone; @@ -33,8 +34,8 @@ abstract class NamedDateTimeFunction extends BaseDateTimeFunction { } @Override - protected Object doFold(long millis, String tzId) { - return nameExtractor.extract(millis, tzId); + protected Object doFold(ZonedDateTime dateTime) { + return nameExtractor.extract(dateTime); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java index 50eac88ae2c44..a0707d2a65e5f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java @@ -9,7 +9,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; -import java.time.Instant; import java.time.ZoneId; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; @@ -31,8 +30,8 @@ public enum NameExtractor { this.apply = apply; } - public final String extract(Long millis, String tzId) { - return extract(ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneId.of(tzId)), tzId); + public final String extract(ZonedDateTime dateTime) { + return apply.apply(dateTime); } public final String extract(ZonedDateTime millis, String tzId) { @@ -73,8 +72,8 @@ NameExtractor extractor() { } @Override - public Object doProcess(long millis) { - return extractor.extract(millis, timeZone().getID()); + public Object doProcess(ZonedDateTime dateTime) { + return extractor.extract(dateTime); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Quarter.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Quarter.java index 51b9501c6eb00..4da5c94626e27 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Quarter.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Quarter.java @@ -14,6 +14,7 @@ import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; import org.elasticsearch.xpack.sql.type.DataType; +import java.time.ZonedDateTime; import java.util.TimeZone; import static org.elasticsearch.xpack.sql.expression.function.scalar.datetime.QuarterProcessor.quarter; @@ -26,8 +27,8 @@ public Quarter(Location location, Expression field, TimeZone timeZone) { } @Override - protected Object doFold(long millis, String tzId) { - return quarter(millis, tzId); + protected Object doFold(ZonedDateTime dateTime) { + return quarter(dateTime); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessor.java index c4d6864b27574..d2a20de84d303 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessor.java @@ -9,7 +9,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; -import java.time.Instant; import java.time.ZoneId; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; @@ -36,17 +35,16 @@ public String getWriteableName() { } @Override - public Object doProcess(long millis) { - return quarter(millis, timeZone().getID()); + public Object doProcess(ZonedDateTime zdt) { + return quarter(zdt); } - public static Integer quarter(long millis, String tzId) { - return quarter(ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneId.of(tzId)), tzId); + public static Integer quarter(ZonedDateTime dateTime, String tzId) { + return quarter(dateTime.withZoneSameInstant(ZoneId.of(tzId))); } - public static Integer quarter(ZonedDateTime zdt, String tzId) { - ZonedDateTime time = zdt.withZoneSameInstant(ZoneId.of(tzId)); - return Integer.valueOf(time.format(QUARTER_FORMAT)); + static Integer quarter(ZonedDateTime zdt) { + return Integer.valueOf(zdt.format(QUARTER_FORMAT)); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/SecondOfMinute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/SecondOfMinute.java index 3522eb10ffe80..3702c4beb3f6f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/SecondOfMinute.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/SecondOfMinute.java @@ -10,7 +10,6 @@ import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; -import java.time.temporal.ChronoField; import java.util.TimeZone; /** @@ -18,7 +17,7 @@ */ public class SecondOfMinute extends DateTimeFunction { public SecondOfMinute(Location location, Expression field, TimeZone timeZone) { - super(location, field, timeZone); + super(location, field, timeZone, DateTimeExtractor.SECOND_OF_MINUTE); } @Override @@ -35,14 +34,4 @@ protected SecondOfMinute replaceChild(Expression newChild) { public String dateTimeFormat() { return "s"; } - - @Override - protected ChronoField chronoField() { - return ChronoField.SECOND_OF_MINUTE; - } - - @Override - protected DateTimeExtractor extractor() { - return DateTimeExtractor.SECOND_OF_MINUTE; - } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/WeekOfYear.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/WeekOfYear.java index 59948165f71cb..8a31ffe36eec8 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/WeekOfYear.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/WeekOfYear.java @@ -10,7 +10,6 @@ import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; -import java.time.temporal.ChronoField; import java.util.TimeZone; /** @@ -18,7 +17,7 @@ */ public class WeekOfYear extends DateTimeFunction { public WeekOfYear(Location location, Expression field, TimeZone timeZone) { - super(location, field, timeZone); + super(location, field, timeZone, DateTimeExtractor.WEEK_OF_YEAR); } @Override @@ -35,14 +34,4 @@ protected WeekOfYear replaceChild(Expression newChild) { public String dateTimeFormat() { return "w"; } - - @Override - protected ChronoField chronoField() { - return ChronoField.ALIGNED_WEEK_OF_YEAR; - } - - @Override - protected DateTimeExtractor extractor() { - return DateTimeExtractor.WEEK_OF_YEAR; - } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Year.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Year.java index 2b065329be305..2eb08c7dd93b8 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Year.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Year.java @@ -10,7 +10,6 @@ import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; -import java.time.temporal.ChronoField; import java.util.TimeZone; /** @@ -18,7 +17,7 @@ */ public class Year extends DateTimeHistogramFunction { public Year(Location location, Expression field, TimeZone timeZone) { - super(location, field, timeZone); + super(location, field, timeZone, DateTimeExtractor.YEAR); } @Override @@ -41,16 +40,6 @@ public Expression orderBy() { return field(); } - @Override - protected ChronoField chronoField() { - return ChronoField.YEAR; - } - - @Override - protected DateTimeExtractor extractor() { - return DateTimeExtractor.YEAR; - } - @Override public String interval() { return "year"; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Arithmetics.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Arithmetics.java index 07fcef391681e..bec35eb449ca9 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Arithmetics.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Arithmetics.java @@ -5,6 +5,10 @@ */ package org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic; +import java.time.Duration; +import java.time.Period; +import java.time.ZonedDateTime; + /** * Arithmetic operation using the type widening rules of the JLS 5.6.2 namely * widen to double or float or long or int in this order. @@ -29,6 +33,38 @@ static Number add(Number l, Number r) { return Integer.valueOf(Math.addExact(l.intValue(), r.intValue())); } + static Period add(Period l, Period r) { + if (l == null || r == null) { + return null; + } + + return l.plus(r); + } + + static Duration add(Duration l, Duration r) { + if (l == null || r == null) { + return null; + } + + return l.plus(r); + } + + static ZonedDateTime add(ZonedDateTime l, Period r) { + if (l == null || r == null) { + return null; + } + + return l.plus(r); + } + + static ZonedDateTime add(ZonedDateTime l, Duration r) { + if (l == null || r == null) { + return null; + } + + return l.plus(r); + } + static Number sub(Number l, Number r) { if (l == null || r == null) { return null; @@ -47,6 +83,38 @@ static Number sub(Number l, Number r) { return Integer.valueOf(Math.subtractExact(l.intValue(), r.intValue())); } + static Period sub(Period l, Period r) { + if (l == null || r == null) { + return null; + } + + return l.minus(r); + } + + static Duration sub(Duration l, Duration r) { + if (l == null || r == null) { + return null; + } + + return l.minus(r); + } + + static ZonedDateTime sub(ZonedDateTime l, Period r) { + if (l == null || r == null) { + return null; + } + + return l.minus(r); + } + + static ZonedDateTime sub(ZonedDateTime l, Duration r) { + if (l == null || r == null) { + return null; + } + + return l.minus(r); + } + static Number mul(Number l, Number r) { if (l == null || r == null) { return null; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java index fc3d023c9b492..b2e8e0c02b0ef 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java @@ -24,7 +24,6 @@ import org.elasticsearch.xpack.sql.expression.function.Function; import org.elasticsearch.xpack.sql.expression.function.UnresolvedFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.Cast; -import org.elasticsearch.xpack.sql.expression.predicate.nulls.IsNull; import org.elasticsearch.xpack.sql.expression.predicate.Range; import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MatchQueryPredicate; import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MultiMatchQueryPredicate; @@ -33,6 +32,7 @@ import org.elasticsearch.xpack.sql.expression.predicate.logical.Not; import org.elasticsearch.xpack.sql.expression.predicate.logical.Or; import org.elasticsearch.xpack.sql.expression.predicate.nulls.IsNotNull; +import org.elasticsearch.xpack.sql.expression.predicate.nulls.IsNull; import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.Add; import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.Div; import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.Mod; @@ -94,6 +94,7 @@ import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.type.DataTypes; +import org.elasticsearch.xpack.sql.util.DateUtils; import org.joda.time.DateTime; import org.joda.time.format.DateTimeFormatter; import org.joda.time.format.DateTimeFormatterBuilder; @@ -631,7 +632,7 @@ public Literal visitDateEscapedLiteral(DateEscapedLiteralContext ctx) { } catch(IllegalArgumentException ex) { throw new ParsingException(loc, "Invalid date received; {}", ex.getMessage()); } - return new Literal(loc, dt, DataType.DATE); + return new Literal(loc, DateUtils.of(dt), DataType.DATE); } @Override @@ -667,7 +668,7 @@ public Literal visitTimestampEscapedLiteral(TimestampEscapedLiteralContext ctx) } catch (IllegalArgumentException ex) { throw new ParsingException(loc, "Invalid timestamp received; {}", ex.getMessage()); } - return new Literal(loc, dt, DataType.DATE); + return new Literal(loc, DateUtils.of(dt), DataType.DATE); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java index de8798ecf544b..34c0f1c6d74f7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java @@ -7,13 +7,15 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.xpack.sql.action.SqlQueryResponse; import org.elasticsearch.xpack.sql.action.CliFormatter; +import org.elasticsearch.xpack.sql.action.SqlQueryResponse; import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.session.Cursor; import org.elasticsearch.xpack.sql.session.Cursors; +import org.elasticsearch.xpack.sql.util.DateUtils; import org.elasticsearch.xpack.sql.util.StringUtils; +import java.time.ZonedDateTime; import java.util.List; import java.util.Locale; import java.util.Objects; @@ -225,7 +227,7 @@ String format(Cursor cursor, RestRequest request, SqlQueryResponse response) { } for (List row : response.rows()) { - row(sb, row, f -> Objects.toString(f, StringUtils.EMPTY)); + row(sb, row, f -> f instanceof ZonedDateTime ? DateUtils.toString((ZonedDateTime) f) : Objects.toString(f, StringUtils.EMPTY)); } return sb.toString(); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java index 26436c614f565..8c21edcee349d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java @@ -8,12 +8,9 @@ import org.elasticsearch.common.Booleans; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.ReadableInstant; -import org.joda.time.format.DateTimeFormatter; -import org.joda.time.format.ISODateTimeFormat; +import org.elasticsearch.xpack.sql.util.DateUtils; +import java.time.ZonedDateTime; import java.util.Locale; import java.util.function.DoubleFunction; import java.util.function.Function; @@ -32,8 +29,6 @@ */ public abstract class DataTypeConversion { - private static final DateTimeFormatter UTC_DATE_FORMATTER = ISODateTimeFormat.dateOptionalTimeParser().withZoneUTC(); - /** * Returns the type compatible with both left and right types *

@@ -374,7 +369,7 @@ public enum Conversion { IDENTITY(Function.identity()), NULL(value -> null), - DATE_TO_STRING(Object::toString), + DATE_TO_STRING(o -> DateUtils.toString((ZonedDateTime) o)), OTHER_TO_STRING(String::valueOf), RATIONAL_TO_LONG(fromDouble(DataTypeConversion::safeToLong)), @@ -416,7 +411,7 @@ public enum Conversion { RATIONAL_TO_DATE(toDate(RATIONAL_TO_LONG)), INTEGER_TO_DATE(toDate(INTEGER_TO_LONG)), BOOL_TO_DATE(toDate(BOOL_TO_INT)), - STRING_TO_DATE(fromString(UTC_DATE_FORMATTER::parseDateTime, "Date")), + STRING_TO_DATE(fromString(DateUtils::of, "Date")), NUMERIC_TO_BOOLEAN(fromLong(value -> value != 0)), STRING_TO_BOOLEAN(fromString(DataTypeConversion::convertToBoolean, "Boolean")), @@ -462,11 +457,11 @@ private static Function fromBool(Function conve } private static Function fromDate(Function converter) { - return l -> ((ReadableInstant) l).getMillis(); + return l -> ((ZonedDateTime) l).toEpochSecond(); } private static Function toDate(Conversion conversion) { - return l -> new DateTime(((Number) conversion.convert(l)).longValue(), DateTimeZone.UTC); + return l -> DateUtils.of(((Number) conversion.convert(l)).longValue()); } public Object convert(Object l) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java index 92bc6f33a5de5..91de6297b9449 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java @@ -6,7 +6,9 @@ package org.elasticsearch.xpack.sql.type; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; -import org.joda.time.DateTime; + +import java.time.OffsetDateTime; +import java.time.ZonedDateTime; public final class DataTypes { @@ -45,7 +47,7 @@ public static DataType fromJava(Object value) { if (value instanceof Short) { return DataType.SHORT; } - if (value instanceof DateTime) { + if (value instanceof ZonedDateTime || value instanceof OffsetDateTime) { return DataType.DATE; } if (value instanceof String || value instanceof Character) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/DateUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/DateUtils.java new file mode 100644 index 0000000000000..b59b158d360be --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/DateUtils.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.util; + +import org.joda.time.DateTime; +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; + +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; + +public class DateUtils { + + // TODO: do we have a java.time based parser we can use instead? + private static final DateTimeFormatter UTC_DATE_FORMATTER = ISODateTimeFormat.dateOptionalTimeParser().withZoneUTC(); + + public static ZoneId UTC = ZoneId.of("UTC"); + + private DateUtils() {} + + + /** + * Creates a date from the millis since epoch (thus the time-zone is UTC). + */ + public static ZonedDateTime of(long millis) { + return ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), UTC); + } + + /** + * Creates a date from the millis since epoch then translates the date into the given timezone. + */ + public static ZonedDateTime of(long millis, ZoneId id) { + return ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), id); + } + + /** + * Parses the given string into a DateTime using UTC as a default timezone. + */ + public static ZonedDateTime of(String dateFormat) { + return of(UTC_DATE_FORMATTER.parseDateTime(dateFormat)); + } + + public static ZonedDateTime of(DateTime dateTime) { + LocalDateTime ldt = LocalDateTime.of( + dateTime.getYear(), + dateTime.getMonthOfYear(), + dateTime.getDayOfMonth(), + dateTime.getHourOfDay(), + dateTime.getMinuteOfHour(), + dateTime.getSecondOfMinute(), + dateTime.getMillisOfSecond() * 1_000_000); + + return ZonedDateTime.ofStrict(ldt, + ZoneOffset.ofTotalSeconds(dateTime.getZone().getOffset(dateTime) / 1000), + org.elasticsearch.common.time.DateUtils.dateTimeZoneToZoneId(dateTime.getZone())); + } + + public static String toString(ZonedDateTime dateTime) { + return org.elasticsearch.xpack.sql.proto.DateUtils.toString(dateTime); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractorTests.java index 11068372bcc8a..c0125a365aac8 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractorTests.java @@ -11,8 +11,7 @@ import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.querydsl.container.GroupByRef.Property; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; +import org.elasticsearch.xpack.sql.util.DateUtils; import java.io.IOException; import java.util.TimeZone; @@ -63,7 +62,7 @@ public void testExtractDate() { long millis = System.currentTimeMillis(); Bucket bucket = new TestBucket(singletonMap(extractor.key(), millis), randomLong(), new Aggregations(emptyList())); - assertEquals(new DateTime(millis, DateTimeZone.forTimeZone(extractor.timeZone())), extractor.extract(bucket)); + assertEquals(DateUtils.of(millis, extractor.timeZone().toZoneId()), extractor.extract(bucket)); } public void testExtractIncorrectDateKey() { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java index 9aa0c9f7b36c2..5c3478eaea343 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java @@ -15,8 +15,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.SqlException; import org.elasticsearch.xpack.sql.type.DataType; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; +import org.elasticsearch.xpack.sql.util.DateUtils; import java.io.IOException; import java.util.ArrayList; @@ -144,7 +143,7 @@ public void testGetDate() { DocumentField field = new DocumentField("my_date_field", documentFieldValues); hit.fields(singletonMap("my_date_field", field)); FieldHitExtractor extractor = new FieldHitExtractor("my_date_field", DataType.DATE, true); - assertEquals(new DateTime(millis, DateTimeZone.UTC), extractor.extract(hit)); + assertEquals(DateUtils.of(millis), extractor.extract(hit)); } public void testGetSource() throws IOException { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessorTests.java index 8b0af5e968137..30c5fa6cb4e5a 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessorTests.java @@ -8,12 +8,12 @@ import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import java.io.IOException; import java.util.TimeZone; +import static org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeTestUtils.dateTime; + public class DateTimeProcessorTests extends AbstractWireSerializingTestCase { private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); @@ -39,12 +39,12 @@ protected DateTimeProcessor mutateInstance(DateTimeProcessor instance) throws IO public void testApply() { DateTimeProcessor proc = new DateTimeProcessor(DateTimeExtractor.YEAR, UTC); - assertEquals(1970, proc.process(new DateTime(0L, DateTimeZone.UTC))); - assertEquals(2017, proc.process(new DateTime(2017, 01, 02, 10, 10, DateTimeZone.UTC))); + assertEquals(1970, proc.process(dateTime(0L))); + assertEquals(2017, proc.process(dateTime(2017, 01, 02, 10, 10))); proc = new DateTimeProcessor(DateTimeExtractor.DAY_OF_MONTH, UTC); - assertEquals(1, proc.process(new DateTime(0L, DateTimeZone.UTC))); - assertEquals(2, proc.process(new DateTime(2017, 01, 02, 10, 10, DateTimeZone.UTC))); - assertEquals(31, proc.process(new DateTime(2017, 01, 31, 10, 10, DateTimeZone.UTC))); + assertEquals(1, proc.process(dateTime(0L))); + assertEquals(2, proc.process(dateTime(2017, 01, 02, 10, 10))); + assertEquals(31, proc.process(dateTime(2017, 01, 31, 10, 10))); } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeTestUtils.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeTestUtils.java new file mode 100644 index 0000000000000..164fe1fe931a4 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeTestUtils.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.util.DateUtils; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.time.ZonedDateTime; + +import static org.junit.Assert.assertEquals; + +public class DateTimeTestUtils { + + private DateTimeTestUtils() {} + + public static ZonedDateTime dateTime(int year, int month, int day, int hour, int minute) { + DateTime dateTime = new DateTime(year, month, day, hour, minute, DateTimeZone.UTC); + ZonedDateTime zdt = ZonedDateTime.of(year, month, day, hour, minute, 0, 0, DateUtils.UTC); + assertEquals(dateTime.getMillis() / 1000, zdt.toEpochSecond()); + return zdt; + } + + public static ZonedDateTime dateTime(long millisSinceEpoch) { + return DateUtils.of(millisSinceEpoch); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYearTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYearTests.java index 0bd54bd738239..c134446a2c340 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYearTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYearTests.java @@ -8,11 +8,11 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.expression.Literal; import org.elasticsearch.xpack.sql.type.DataType; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import java.util.TimeZone; +import static org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeTestUtils.dateTime; + public class DayOfYearTests extends ESTestCase { private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); @@ -22,10 +22,6 @@ public void testAsColumnProcessor() { assertEquals(365, extract(dateTime(0), TimeZone.getTimeZone("GMT-01:00"))); } - private DateTime dateTime(long millisSinceEpoch) { - return new DateTime(millisSinceEpoch, DateTimeZone.forTimeZone(UTC)); - } - private Object extract(Object value, TimeZone timeZone) { return build(value, timeZone).asPipe().asProcessor().process(value); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessorTests.java index 0f12ae05f86d4..379cf5f7e090f 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessorTests.java @@ -10,13 +10,13 @@ import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor.NameExtractor; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import org.junit.Assume; import java.io.IOException; import java.util.TimeZone; +import static org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeTestUtils.dateTime; + public class NamedDateTimeProcessorTests extends AbstractWireSerializingTestCase { private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); @@ -44,55 +44,55 @@ protected NamedDateTimeProcessor mutateInstance(NamedDateTimeProcessor instance) public void testValidDayNamesInUTC() { assumeJava9PlusAndCompatLocaleProviderSetting(); NamedDateTimeProcessor proc = new NamedDateTimeProcessor(NameExtractor.DAY_NAME, UTC); - assertEquals("Thursday", proc.process("0")); - assertEquals("Saturday", proc.process("-64164233612338")); - assertEquals("Monday", proc.process("64164233612338")); + assertEquals("Thursday", proc.process(dateTime(0L))); + assertEquals("Saturday", proc.process(dateTime(-64164233612338L))); + assertEquals("Monday", proc.process(dateTime(64164233612338L))); - assertEquals("Thursday", proc.process(new DateTime(0L, DateTimeZone.UTC))); - assertEquals("Thursday", proc.process(new DateTime(-5400, 12, 25, 2, 0, DateTimeZone.UTC))); - assertEquals("Friday", proc.process(new DateTime(30, 2, 1, 12, 13, DateTimeZone.UTC))); - assertEquals("Tuesday", proc.process(new DateTime(10902, 8, 22, 11, 11, DateTimeZone.UTC))); + assertEquals("Thursday", proc.process(dateTime(0L))); + assertEquals("Thursday", proc.process(dateTime(-5400, 12, 25, 2, 0))); + assertEquals("Friday", proc.process(dateTime(30, 2, 1, 12, 13))); + assertEquals("Tuesday", proc.process(dateTime(10902, 8, 22, 11, 11))); } public void testValidDayNamesWithNonUTCTimeZone() { assumeJava9PlusAndCompatLocaleProviderSetting(); NamedDateTimeProcessor proc = new NamedDateTimeProcessor(NameExtractor.DAY_NAME, TimeZone.getTimeZone("GMT-10:00")); - assertEquals("Wednesday", proc.process("0")); - assertEquals("Friday", proc.process("-64164233612338")); - assertEquals("Monday", proc.process("64164233612338")); + assertEquals("Wednesday", proc.process(dateTime(0))); + assertEquals("Friday", proc.process(dateTime(-64164233612338L))); + assertEquals("Monday", proc.process(dateTime(64164233612338L))); - assertEquals("Wednesday", proc.process(new DateTime(0L, DateTimeZone.UTC))); - assertEquals("Wednesday", proc.process(new DateTime(-5400, 12, 25, 2, 0, DateTimeZone.UTC))); - assertEquals("Friday", proc.process(new DateTime(30, 2, 1, 12, 13, DateTimeZone.UTC))); - assertEquals("Tuesday", proc.process(new DateTime(10902, 8, 22, 11, 11, DateTimeZone.UTC))); - assertEquals("Monday", proc.process(new DateTime(10902, 8, 22, 9, 59, DateTimeZone.UTC))); + assertEquals("Wednesday", proc.process(dateTime(0L))); + assertEquals("Wednesday", proc.process(dateTime(-5400, 12, 25, 2, 0))); + assertEquals("Friday", proc.process(dateTime(30, 2, 1, 12, 13))); + assertEquals("Tuesday", proc.process(dateTime(10902, 8, 22, 11, 11))); + assertEquals("Monday", proc.process(dateTime(10902, 8, 22, 9, 59))); } public void testValidMonthNamesInUTC() { assumeJava9PlusAndCompatLocaleProviderSetting(); NamedDateTimeProcessor proc = new NamedDateTimeProcessor(NameExtractor.MONTH_NAME, UTC); - assertEquals("January", proc.process("0")); - assertEquals("September", proc.process("-64164233612338")); - assertEquals("April", proc.process("64164233612338")); + assertEquals("January", proc.process(dateTime(0))); + assertEquals("September", proc.process(dateTime(-64165813612338L))); + assertEquals("April", proc.process(dateTime(64164233612338L))); - assertEquals("January", proc.process(new DateTime(0L, DateTimeZone.UTC))); - assertEquals("December", proc.process(new DateTime(-5400, 12, 25, 10, 10, DateTimeZone.UTC))); - assertEquals("February", proc.process(new DateTime(30, 2, 1, 12, 13, DateTimeZone.UTC))); - assertEquals("August", proc.process(new DateTime(10902, 8, 22, 11, 11, DateTimeZone.UTC))); + assertEquals("January", proc.process(dateTime(0L))); + assertEquals("December", proc.process(dateTime(-5400, 12, 25, 10, 10))); + assertEquals("February", proc.process(dateTime(30, 2, 1, 12, 13))); + assertEquals("August", proc.process(dateTime(10902, 8, 22, 11, 11))); } public void testValidMonthNamesWithNonUTCTimeZone() { assumeJava9PlusAndCompatLocaleProviderSetting(); NamedDateTimeProcessor proc = new NamedDateTimeProcessor(NameExtractor.MONTH_NAME, TimeZone.getTimeZone("GMT-3:00")); - assertEquals("December", proc.process("0")); - assertEquals("August", proc.process("-64165813612338")); // GMT: Tuesday, September 1, -0064 2:53:07.662 AM - assertEquals("April", proc.process("64164233612338")); // GMT: Monday, April 14, 4003 2:13:32.338 PM + assertEquals("December", proc.process(dateTime(0))); + assertEquals("August", proc.process(dateTime(-64165813612338L))); // GMT: Tuesday, September 1, -0064 2:53:07.662 AM + assertEquals("April", proc.process(dateTime(64164233612338L))); // GMT: Monday, April 14, 4003 2:13:32.338 PM - assertEquals("December", proc.process(new DateTime(0L, DateTimeZone.UTC))); - assertEquals("November", proc.process(new DateTime(-5400, 12, 1, 1, 1, DateTimeZone.UTC))); - assertEquals("February", proc.process(new DateTime(30, 2, 1, 12, 13, DateTimeZone.UTC))); - assertEquals("July", proc.process(new DateTime(10902, 8, 1, 2, 59, DateTimeZone.UTC))); - assertEquals("August", proc.process(new DateTime(10902, 8, 1, 3, 00, DateTimeZone.UTC))); + assertEquals("December", proc.process(dateTime(0L))); + assertEquals("November", proc.process(dateTime(-5400, 12, 1, 1, 1))); + assertEquals("February", proc.process(dateTime(30, 2, 1, 12, 13))); + assertEquals("July", proc.process(dateTime(10902, 8, 1, 2, 59))); + assertEquals("August", proc.process(dateTime(10902, 8, 1, 3, 00))); } /* @@ -109,8 +109,8 @@ private void assumeJava9PlusAndCompatLocaleProviderSetting() { } String beforeJava9CompatibleLocale = System.getProperty("java.locale.providers"); // and COMPAT setting needs to be first on the list - boolean isBeforeJava9Compatible = beforeJava9CompatibleLocale != null + boolean isBeforeJava9Compatible = beforeJava9CompatibleLocale != null && Strings.tokenizeToStringArray(beforeJava9CompatibleLocale, ",")[0].equals("COMPAT"); Assume.assumeTrue(isBeforeJava9Compatible); } -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessorTests.java index 7747bb8cae4ed..29e5d31db2172 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessorTests.java @@ -7,11 +7,11 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import java.util.TimeZone; +import static org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeTestUtils.dateTime; + public class QuarterProcessorTests extends ESTestCase { private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); @@ -19,28 +19,28 @@ public class QuarterProcessorTests extends ESTestCase { public void testQuarterWithUTCTimezone() { QuarterProcessor proc = new QuarterProcessor(UTC); - assertEquals(1, proc.process(new DateTime(0L, DateTimeZone.UTC))); - assertEquals(4, proc.process(new DateTime(-5400, 12, 25, 10, 10, DateTimeZone.UTC))); - assertEquals(1, proc.process(new DateTime(30, 2, 1, 12, 13, DateTimeZone.UTC))); - assertEquals(3, proc.process(new DateTime(10902, 8, 22, 11, 11, DateTimeZone.UTC))); + assertEquals(1, proc.process(dateTime(0L))); + assertEquals(4, proc.process(dateTime(-5400, 12, 25, 10, 10))); + assertEquals(1, proc.process(dateTime(30, 2, 1, 12, 13))); + assertEquals(3, proc.process(dateTime(10902, 8, 22, 11, 11))); - assertEquals(1, proc.process("0")); - assertEquals(3, proc.process("-64164233612338")); - assertEquals(2, proc.process("64164233612338")); + assertEquals(1, proc.process(dateTime(0L))); + assertEquals(3, proc.process(dateTime(-64164233612338L))); + assertEquals(2, proc.process(dateTime(64164233612338L))); } public void testValidDayNamesWithNonUTCTimeZone() { QuarterProcessor proc = new QuarterProcessor(TimeZone.getTimeZone("GMT-10:00")); - assertEquals(4, proc.process(new DateTime(0L, DateTimeZone.UTC))); - assertEquals(4, proc.process(new DateTime(-5400, 1, 1, 5, 0, DateTimeZone.UTC))); - assertEquals(1, proc.process(new DateTime(30, 4, 1, 9, 59, DateTimeZone.UTC))); + assertEquals(4, proc.process(dateTime(0L))); + assertEquals(4, proc.process(dateTime(-5400, 1, 1, 5, 0))); + assertEquals(1, proc.process(dateTime(30, 4, 1, 9, 59))); proc = new QuarterProcessor(TimeZone.getTimeZone("GMT+10:00")); - assertEquals(4, proc.process(new DateTime(10902, 9, 30, 14, 1, DateTimeZone.UTC))); - assertEquals(3, proc.process(new DateTime(10902, 9, 30, 13, 59, DateTimeZone.UTC))); + assertEquals(4, proc.process(dateTime(10902, 9, 30, 14, 1))); + assertEquals(3, proc.process(dateTime(10902, 9, 30, 13, 59))); - assertEquals(1, proc.process("0")); - assertEquals(3, proc.process("-64164233612338")); - assertEquals(2, proc.process("64164233612338")); + assertEquals(1, proc.process(dateTime(0L))); + assertEquals(3, proc.process(dateTime(-64164233612338L))); + assertEquals(2, proc.process(dateTime(64164233612338L))); } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java index e2c42874696ab..5c2b4e396acbe 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java @@ -28,7 +28,7 @@ import org.elasticsearch.xpack.sql.querydsl.query.TermsQuery; import org.elasticsearch.xpack.sql.type.EsField; import org.elasticsearch.xpack.sql.type.TypesTests; -import org.joda.time.DateTime; +import org.elasticsearch.xpack.sql.util.DateUtils; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -150,7 +150,7 @@ public void testDateRangeCast() { assertTrue(query instanceof RangeQuery); RangeQuery rq = (RangeQuery) query; assertEquals("date", rq.field()); - assertEquals(DateTime.parse("1969-05-13T12:34:56Z"), rq.lower()); + assertEquals(DateUtils.of("1969-05-13T12:34:56Z"), rq.lower()); } public void testLikeConstructsNotSupported() { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java index 7a04139430e33..49414367767c4 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java @@ -9,9 +9,11 @@ import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.expression.Literal; import org.elasticsearch.xpack.sql.type.DataTypeConversion.Conversion; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; +import org.elasticsearch.xpack.sql.util.DateUtils; +import java.time.ZonedDateTime; + +import static org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeTestUtils.dateTime; import static org.elasticsearch.xpack.sql.tree.Location.EMPTY; public class DataTypeConversionTests extends ESTestCase { @@ -22,7 +24,7 @@ public void testConversionToString() { conversion = DataTypeConversion.conversionFor(DataType.DATE, DataType.KEYWORD); assertNull(conversion.convert(null)); - assertEquals("1970-01-01T00:00:00.000Z", conversion.convert(new DateTime(0, DateTimeZone.UTC))); + assertEquals("1970-01-01T00:00:00.000Z", conversion.convert(dateTime(0))); } /** @@ -64,33 +66,33 @@ public void testConversionToDate() { { Conversion conversion = DataTypeConversion.conversionFor(DataType.DOUBLE, to); assertNull(conversion.convert(null)); - assertEquals(new DateTime(10L, DateTimeZone.UTC), conversion.convert(10.0)); - assertEquals(new DateTime(10L, DateTimeZone.UTC), conversion.convert(10.1)); - assertEquals(new DateTime(11L, DateTimeZone.UTC), conversion.convert(10.6)); + assertEquals(dateTime(10L), conversion.convert(10.0)); + assertEquals(dateTime(10L), conversion.convert(10.1)); + assertEquals(dateTime(11L), conversion.convert(10.6)); Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert(Double.MAX_VALUE)); assertEquals("[" + Double.MAX_VALUE + "] out of [Long] range", e.getMessage()); } { Conversion conversion = DataTypeConversion.conversionFor(DataType.INTEGER, to); assertNull(conversion.convert(null)); - assertEquals(new DateTime(10L, DateTimeZone.UTC), conversion.convert(10)); - assertEquals(new DateTime(-134L, DateTimeZone.UTC), conversion.convert(-134)); + assertEquals(dateTime(10L), conversion.convert(10)); + assertEquals(dateTime(-134L), conversion.convert(-134)); } { Conversion conversion = DataTypeConversion.conversionFor(DataType.BOOLEAN, to); assertNull(conversion.convert(null)); - assertEquals(new DateTime(1, DateTimeZone.UTC), conversion.convert(true)); - assertEquals(new DateTime(0, DateTimeZone.UTC), conversion.convert(false)); + assertEquals(dateTime(1), conversion.convert(true)); + assertEquals(dateTime(0), conversion.convert(false)); } Conversion conversion = DataTypeConversion.conversionFor(DataType.KEYWORD, to); assertNull(conversion.convert(null)); - assertEquals(new DateTime(1000L, DateTimeZone.UTC), conversion.convert("1970-01-01T00:00:01Z")); - assertEquals(new DateTime(1483228800000L, DateTimeZone.UTC), conversion.convert("2017-01-01T00:00:00Z")); - assertEquals(new DateTime(18000000L, DateTimeZone.UTC), conversion.convert("1970-01-01T00:00:00-05:00")); + assertEquals(dateTime(1000L), conversion.convert("1970-01-01T00:00:01Z")); + assertEquals(dateTime(1483228800000L), conversion.convert("2017-01-01T00:00:00Z")); + assertEquals(dateTime(18000000L), conversion.convert("1970-01-01T00:00:00-05:00")); // double check back and forth conversion - DateTime dt = DateTime.now(DateTimeZone.UTC); + ZonedDateTime dt = ZonedDateTime.now(DateUtils.UTC); Conversion forward = DataTypeConversion.conversionFor(DataType.DATE, DataType.KEYWORD); Conversion back = DataTypeConversion.conversionFor(DataType.KEYWORD, DataType.DATE); assertEquals(dt, back.convert(forward.convert(dt))); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.move_to_step.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.move_to_step.json index ca3f1e76fb256..e3adf2d30e681 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.move_to_step.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.move_to_step.json @@ -1,6 +1,7 @@ { "ilm.move_to_step": { - "documentation": "http://www.elastic.co/guide/en/index_lifecycle/current/index_lifecycle.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-move-to-step.html", + "description": "Triggers execution of a specific step in the lifecycle policy. Since this action is designed to only be run in emergency situations, clients should not implement this API", "methods": [ "POST" ], "url": { "path": "/_ilm/move/{index}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml index 721a6d41276dd..c19325bf707de 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml @@ -87,7 +87,7 @@ setup: --- "Test wait_for_completion default timeout": - skip: - version: " - 6.6.0" + version: " - 6.5.99" reason: wait_for_completion option was added in 6.6 - do: @@ -108,7 +108,7 @@ setup: --- "Test wait_for_completion with custom timeout": - skip: - version: " - 6.6.0" + version: " - 6.5.99" reason: wait_for_completion option was added in 6.6 - do: diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java index d513bd1cd77f2..9ebc3adc291a1 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java @@ -20,7 +20,6 @@ import java.util.Arrays; import java.util.List; import java.util.Locale; -import java.util.TimeZone; public class CronEvalTool extends LoggingAwareCommand { @@ -31,7 +30,7 @@ public static void main(String[] args) throws Exception { private static final DateTimeFormatter UTC_FORMATTER = DateTimeFormat.forPattern("EEE, d MMM yyyy HH:mm:ss") .withZone(DateTimeZone.UTC).withLocale(Locale.ROOT); private static final DateTimeFormatter LOCAL_FORMATTER = DateTimeFormat.forPattern("EEE, d MMM yyyy HH:mm:ss Z") - .withZone(DateTimeZone.forTimeZone(TimeZone.getDefault())); + .withZone(DateTimeZone.forTimeZone(null)); private final OptionSpec countOption; private final OptionSpec arguments; diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java index 2a681afc14cb7..cc704d71d0e54 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java @@ -49,7 +49,7 @@ private int expectedNumUniqueNodeNameBuckets() throws IOException { } private void assertAuditDocsExist() throws Exception { - Response response = client().performRequest(new Request("GET", "/.security_audit_log*/doc/_count")); + Response response = client().performRequest(new Request("GET", "/.security_audit_log*/_count")); assertEquals(200, response.getStatusLine().getStatusCode()); Map responseMap = entityAsMap(response); assertNotNull(responseMap.get("count")); diff --git a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomAuthenticationFailureHandler.java b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomAuthenticationFailureHandler.java index 73f71c8fe584a..66ccdd0e3529e 100644 --- a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomAuthenticationFailureHandler.java +++ b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomAuthenticationFailureHandler.java @@ -12,8 +12,14 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.authc.DefaultAuthenticationFailureHandler; +import java.util.Collections; + public class CustomAuthenticationFailureHandler extends DefaultAuthenticationFailureHandler { + public CustomAuthenticationFailureHandler() { + super(Collections.emptyMap()); + } + @Override public ElasticsearchSecurityException failedAuthentication(RestRequest request, AuthenticationToken token, ThreadContext context) {