diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 70a8438dacf56..2693a05fb72fd 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -64,3 +64,12 @@ thirdPartyAudit.ignoreViolations( 'org.openjdk.jmh.profile.HotspotRuntimeProfiler', 'org.openjdk.jmh.util.Utils' ) + +spotless { + java { + // IDEs can sometimes run annotation processors that leave files in + // here, causing Spotless to complain. Even though this path ought not + // to exist, exclude it anyway in order to avoid spurious failures. + targetExclude 'src/main/generated/**/*.java' + } +} diff --git a/build.gradle b/build.gradle index 39d227461a5c6..3304cbb177e3b 100644 --- a/build.gradle +++ b/build.gradle @@ -219,8 +219,8 @@ task verifyVersions { * after the backport of the backcompat code is complete. */ -boolean bwc_tests_enabled = true -final String bwc_tests_disabled_issue = "" /* place a PR link here when committing bwc changes */ +boolean bwc_tests_enabled = false +final String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/52998" /* place a PR link here when committing bwc changes */ if (bwc_tests_enabled == false) { if (bwc_tests_disabled_issue.isEmpty()) { throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 81777053b556a..9d654e0e6682b 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 8.0.0 -lucene = 8.5.0-snapshot-b01d7cb +lucene = 8.5.0-snapshot-c4475920b08 bundled_jdk_vendor = adoptopenjdk bundled_jdk = 13.0.2+8 diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/eql/EqlSearchRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/eql/EqlSearchRequest.java index a8b342f0a43f1..9bca610194bb6 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/eql/EqlSearchRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/eql/EqlSearchRequest.java @@ -36,32 +36,32 @@ public class EqlSearchRequest implements Validatable, ToXContentObject { private String[] indices; private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false); - private QueryBuilder query = null; + private QueryBuilder filter = null; private String timestampField = "@timestamp"; private String eventTypeField = "event_type"; private String implicitJoinKeyField = "agent.id"; private int fetchSize = 50; private SearchAfterBuilder searchAfterBuilder; - private String rule; + private String query; - static final String KEY_QUERY = "query"; + static final String KEY_FILTER = "filter"; static final String KEY_TIMESTAMP_FIELD = "timestamp_field"; static final String KEY_EVENT_TYPE_FIELD = "event_type_field"; static final String KEY_IMPLICIT_JOIN_KEY_FIELD = "implicit_join_key_field"; static final String KEY_SIZE = "size"; static final String KEY_SEARCH_AFTER = "search_after"; - static final String KEY_RULE = "rule"; + static final String KEY_QUERY = "query"; - public EqlSearchRequest(String indices, String rule) { + public EqlSearchRequest(String indices, String query) { indices(indices); - rule(rule); + query(query); } @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); - if (query != null) { - builder.field(KEY_QUERY, query); + if (filter != null) { + builder.field(KEY_FILTER, filter); } builder.field(KEY_TIMESTAMP_FIELD, timestampField()); builder.field(KEY_EVENT_TYPE_FIELD, eventTypeField()); @@ -74,7 +74,7 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par builder.array(KEY_SEARCH_AFTER, searchAfterBuilder.getSortValues()); } - builder.field(KEY_RULE, rule); + builder.field(KEY_QUERY, query); builder.endObject(); return builder; } @@ -88,12 +88,12 @@ public EqlSearchRequest indices(String... indices) { return this; } - public QueryBuilder query() { - return this.query; + public QueryBuilder filter() { + return this.filter; } - public EqlSearchRequest query(QueryBuilder query) { - this.query = query; + public EqlSearchRequest filter(QueryBuilder filter) { + this.filter = filter; return this; } @@ -156,13 +156,13 @@ private EqlSearchRequest setSearchAfter(SearchAfterBuilder builder) { return this; } - public String rule() { - return this.rule; + public String query() { + return this.query; } - public EqlSearchRequest rule(String rule) { - Objects.requireNonNull(rule, "rule must not be null"); - this.rule = rule; + public EqlSearchRequest query(String query) { + Objects.requireNonNull(query, "query must not be null"); + this.query = query; return this; } @@ -175,16 +175,15 @@ public boolean equals(Object o) { return false; } EqlSearchRequest that = (EqlSearchRequest) o; - return - fetchSize == that.fetchSize && + return fetchSize == that.fetchSize && Arrays.equals(indices, that.indices) && Objects.equals(indicesOptions, that.indicesOptions) && - Objects.equals(query, that.query) && + Objects.equals(filter, that.filter) && Objects.equals(timestampField, that.timestampField) && Objects.equals(eventTypeField, that.eventTypeField) && Objects.equals(implicitJoinKeyField, that.implicitJoinKeyField) && Objects.equals(searchAfterBuilder, that.searchAfterBuilder) && - Objects.equals(rule, that.rule); + Objects.equals(query, that.query); } @Override @@ -192,13 +191,13 @@ public int hashCode() { return Objects.hash( Arrays.hashCode(indices), indicesOptions, - query, + filter, fetchSize, timestampField, eventTypeField, implicitJoinKeyField, searchAfterBuilder, - rule); + query); } public String[] indices() { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsStats.java index bfef47727f631..53e3adf2b8433 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsStats.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsStats.java @@ -44,6 +44,7 @@ public static DataFrameAnalyticsStats fromXContent(XContentParser parser) throws static final ParseField STATE = new ParseField("state"); static final ParseField FAILURE_REASON = new ParseField("failure_reason"); static final ParseField PROGRESS = new ParseField("progress"); + static final ParseField MEMORY_USAGE = new ParseField("memory_usage"); static final ParseField NODE = new ParseField("node"); static final ParseField ASSIGNMENT_EXPLANATION = new ParseField("assignment_explanation"); @@ -55,8 +56,9 @@ public static DataFrameAnalyticsStats fromXContent(XContentParser parser) throws (DataFrameAnalyticsState) args[1], (String) args[2], (List) args[3], - (NodeAttributes) args[4], - (String) args[5])); + (MemoryUsage) args[4], + (NodeAttributes) args[5], + (String) args[6])); static { PARSER.declareString(constructorArg(), ID); @@ -68,6 +70,7 @@ public static DataFrameAnalyticsStats fromXContent(XContentParser parser) throws }, STATE, ObjectParser.ValueType.STRING); PARSER.declareString(optionalConstructorArg(), FAILURE_REASON); PARSER.declareObjectArray(optionalConstructorArg(), PhaseProgress.PARSER, PROGRESS); + PARSER.declareObject(optionalConstructorArg(), MemoryUsage.PARSER, MEMORY_USAGE); PARSER.declareObject(optionalConstructorArg(), NodeAttributes.PARSER, NODE); PARSER.declareString(optionalConstructorArg(), ASSIGNMENT_EXPLANATION); } @@ -76,16 +79,18 @@ public static DataFrameAnalyticsStats fromXContent(XContentParser parser) throws private final DataFrameAnalyticsState state; private final String failureReason; private final List progress; + private final MemoryUsage memoryUsage; private final NodeAttributes node; private final String assignmentExplanation; public DataFrameAnalyticsStats(String id, DataFrameAnalyticsState state, @Nullable String failureReason, - @Nullable List progress, @Nullable NodeAttributes node, - @Nullable String assignmentExplanation) { + @Nullable List progress, @Nullable MemoryUsage memoryUsage, + @Nullable NodeAttributes node, @Nullable String assignmentExplanation) { this.id = id; this.state = state; this.failureReason = failureReason; this.progress = progress; + this.memoryUsage = memoryUsage; this.node = node; this.assignmentExplanation = assignmentExplanation; } @@ -106,6 +111,11 @@ public List getProgress() { return progress; } + @Nullable + public MemoryUsage getMemoryUsage() { + return memoryUsage; + } + public NodeAttributes getNode() { return node; } @@ -124,13 +134,14 @@ public boolean equals(Object o) { && Objects.equals(state, other.state) && Objects.equals(failureReason, other.failureReason) && Objects.equals(progress, other.progress) + && Objects.equals(memoryUsage, other.memoryUsage) && Objects.equals(node, other.node) && Objects.equals(assignmentExplanation, other.assignmentExplanation); } @Override public int hashCode() { - return Objects.hash(id, state, failureReason, progress, node, assignmentExplanation); + return Objects.hash(id, state, failureReason, progress, memoryUsage, node, assignmentExplanation); } @Override @@ -140,6 +151,7 @@ public String toString() { .add("state", state) .add("failureReason", failureReason) .add("progress", progress) + .add("memoryUsage", memoryUsage) .add("node", node) .add("assignmentExplanation", assignmentExplanation) .toString(); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/MemoryUsage.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/MemoryUsage.java new file mode 100644 index 0000000000000..323ebb52a7aed --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/MemoryUsage.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.dataframe; + +import org.elasticsearch.client.common.TimeUtil; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.inject.internal.ToStringBuilder; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.time.Instant; +import java.util.Objects; + +public class MemoryUsage implements ToXContentObject { + + static final ParseField TIMESTAMP = new ParseField("timestamp"); + static final ParseField PEAK_USAGE_BYTES = new ParseField("peak_usage_bytes"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("analytics_memory_usage", + true, a -> new MemoryUsage((Instant) a[0], (long) a[1])); + + static { + PARSER.declareField(ConstructingObjectParser.constructorArg(), + p -> TimeUtil.parseTimeFieldToInstant(p, TIMESTAMP.getPreferredName()), + TIMESTAMP, + ObjectParser.ValueType.VALUE); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), PEAK_USAGE_BYTES); + } + + private final Instant timestamp; + private final long peakUsageBytes; + + public MemoryUsage(Instant timestamp, long peakUsageBytes) { + this.timestamp = Instant.ofEpochMilli(Objects.requireNonNull(timestamp).toEpochMilli()); + this.peakUsageBytes = peakUsageBytes; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.timeField(TIMESTAMP.getPreferredName(), TIMESTAMP.getPreferredName() + "_string", timestamp.toEpochMilli()); + builder.field(PEAK_USAGE_BYTES.getPreferredName(), peakUsageBytes); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (o == this) return true; + if (o == null || getClass() != o.getClass()) return false; + + MemoryUsage other = (MemoryUsage) o; + return Objects.equals(timestamp, other.timestamp) + && peakUsageBytes == other.peakUsageBytes; + } + + @Override + public int hashCode() { + return Objects.hash(timestamp, peakUsageBytes); + } + + @Override + public String toString() { + return new ToStringBuilder(getClass()) + .add(TIMESTAMP.getPreferredName(), timestamp.getEpochSecond()) + .add(PEAK_USAGE_BYTES.getPreferredName(), peakUsageBytes) + .toString(); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 6fe08f8a507de..3e9cc1f55ff38 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -1506,6 +1506,7 @@ public void testGetDataFrameAnalyticsStats() throws Exception { assertThat(progress.get(1), equalTo(new PhaseProgress("loading_data", 0))); assertThat(progress.get(2), equalTo(new PhaseProgress("analyzing", 0))); assertThat(progress.get(3), equalTo(new PhaseProgress("writing_results", 0))); + assertThat(stats.getMemoryUsage(), is(nullValue())); } public void testStartDataFrameAnalyticsConfig() throws Exception { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/eql/EqlSearchRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/eql/EqlSearchRequestTests.java index cfbe9d8f76f9b..8791b3356e9bb 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/eql/EqlSearchRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/eql/EqlSearchRequestTests.java @@ -46,7 +46,7 @@ protected EqlSearchRequest createClientTestInstance() { EqlSearchRequest.eventTypeField(randomAlphaOfLength(10)); } if (randomBoolean()) { - EqlSearchRequest.rule(randomAlphaOfLength(10)); + EqlSearchRequest.query(randomAlphaOfLength(10)); } if (randomBoolean()) { EqlSearchRequest.timestampField(randomAlphaOfLength(10)); @@ -56,9 +56,9 @@ protected EqlSearchRequest createClientTestInstance() { } if (randomBoolean()) { if (randomBoolean()) { - EqlSearchRequest.query(QueryBuilders.matchAllQuery()); + EqlSearchRequest.filter(QueryBuilders.matchAllQuery()); } else { - EqlSearchRequest.query(QueryBuilders.termQuery(randomAlphaOfLength(10), randomInt(100))); + EqlSearchRequest.filter(QueryBuilders.termQuery(randomAlphaOfLength(10), randomInt(100))); } } return EqlSearchRequest; @@ -75,8 +75,8 @@ protected void assertInstances(org.elasticsearch.xpack.eql.action.EqlSearchReque assertThat(serverInstance.eventTypeField(), equalTo(clientTestInstance.eventTypeField())); assertThat(serverInstance.implicitJoinKeyField(), equalTo(clientTestInstance.implicitJoinKeyField())); assertThat(serverInstance.timestampField(), equalTo(clientTestInstance.timestampField())); + assertThat(serverInstance.filter(), equalTo(clientTestInstance.filter())); assertThat(serverInstance.query(), equalTo(clientTestInstance.query())); - assertThat(serverInstance.rule(), equalTo(clientTestInstance.rule())); assertThat(serverInstance.searchAfter(), equalTo(clientTestInstance.searchAfter())); assertThat(serverInstance.indicesOptions(), equalTo(clientTestInstance.indicesOptions())); assertThat(serverInstance.indices(), equalTo(clientTestInstance.indices())); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsStatsTests.java index f8eddd36bc6d9..48ebf71e36023 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsStatsTests.java @@ -47,6 +47,7 @@ public static DataFrameAnalyticsStats randomDataFrameAnalyticsStats() { randomFrom(DataFrameAnalyticsState.values()), randomBoolean() ? null : randomAlphaOfLength(10), randomBoolean() ? null : createRandomProgress(), + randomBoolean() ? null : MemoryUsageTests.createRandom(), randomBoolean() ? null : NodeAttributesTests.createRandom(), randomBoolean() ? null : randomAlphaOfLengthBetween(1, 20)); } @@ -70,6 +71,9 @@ public static void toXContent(DataFrameAnalyticsStats stats, XContentBuilder bui if (stats.getProgress() != null) { builder.field(DataFrameAnalyticsStats.PROGRESS.getPreferredName(), stats.getProgress()); } + if (stats.getMemoryUsage() != null) { + builder.field(DataFrameAnalyticsStats.MEMORY_USAGE.getPreferredName(), stats.getMemoryUsage()); + } if (stats.getNode() != null) { builder.field(DataFrameAnalyticsStats.NODE.getPreferredName(), stats.getNode()); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/MemoryUsageTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/MemoryUsageTests.java new file mode 100644 index 0000000000000..8e06db6f2b37f --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/MemoryUsageTests.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.dataframe; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.time.Instant; + +public class MemoryUsageTests extends AbstractXContentTestCase { + + @Override + protected MemoryUsage createTestInstance() { + return createRandom(); + } + + public static MemoryUsage createRandom() { + return new MemoryUsage(Instant.now(), randomNonNegativeLong()); + } + + @Override + protected MemoryUsage doParseInstance(XContentParser parser) throws IOException { + return MemoryUsage.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/docs/README.asciidoc b/docs/README.asciidoc index 0cdef202f9900..f6a8ed48e63d1 100644 --- a/docs/README.asciidoc +++ b/docs/README.asciidoc @@ -23,7 +23,7 @@ See: https://github.com/elastic/docs Snippets marked with `[source,console]` are automatically annotated with "VIEW IN CONSOLE" and "COPY AS CURL" in the documentation and are automatically tested by the command `./gradlew -pdocs check`. To test just the docs from a -single page, use e.g. `./gradlew -ddocs integTestRunner --tests "*rollover*"`. +single page, use e.g. `./gradlew -pdocs integTestRunner --tests "\*rollover*"`. By default each `[source,console]` snippet runs as its own isolated test. You can manipulate the test execution in the following ways: diff --git a/docs/reference/analysis/index-search-time.asciidoc b/docs/reference/analysis/index-search-time.asciidoc index 894cb36f871d0..652fb5eb989dd 100644 --- a/docs/reference/analysis/index-search-time.asciidoc +++ b/docs/reference/analysis/index-search-time.asciidoc @@ -67,7 +67,7 @@ analyzer produces the following tokens: [ quick, fox ] ------ -To execute the serach, {es} compares these query string tokens to the tokens +To execute the search, {es} compares these query string tokens to the tokens indexed in the `text` field. [options="header"] diff --git a/docs/reference/analysis/tokenfilters/trim-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/trim-tokenfilter.asciidoc index 1373811b0cb82..19d47f203afb8 100644 --- a/docs/reference/analysis/tokenfilters/trim-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/trim-tokenfilter.asciidoc @@ -4,4 +4,107 @@ Trim ++++ -The `trim` token filter trims the whitespace surrounding a token. +Removes leading and trailing whitespace from each token in a stream. + +The `trim` filter uses Lucene's +https://lucene.apache.org/core/{lucene_version_path}/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html[TrimFilter]. + +[TIP] +==== +Many commonly used tokenizers, such as the +<> or +<> tokenizer, remove whitespace by +default. When using these tokenizers, you don't need to add a separate `trim` +filter. +==== + +[[analysis-trim-tokenfilter-analyze-ex]] +==== Example + +To see how the `trim` filter works, you first need to produce a token +containing whitespace. + +The following <> request uses the +<> tokenizer to produce a token for +`" fox "`. + +[source,console] +---- +GET _analyze +{ + "tokenizer" : "keyword", + "text" : " fox " +} +---- + +The API returns the following response. Note the `" fox "` token contains +the original text's whitespace. + +[source,console-result] +---- +{ + "tokens": [ + { + "token": " fox ", + "start_offset": 0, + "end_offset": 5, + "type": "word", + "position": 0 + } + ] +} +---- + +To remove the whitespace, add the `trim` filter to the previous analyze API +request. + +[source,console] +---- +GET _analyze +{ + "tokenizer" : "keyword", + "filter" : ["trim"], + "text" : " fox " +} +---- + +The API returns the following response. The returned `fox` token does not +include any leading or trailing whitespace. + +[source,console-result] +---- +{ + "tokens": [ + { + "token": "fox", + "start_offset": 0, + "end_offset": 5, + "type": "word", + "position": 0 + } + ] +} +---- + +[[analysis-trim-tokenfilter-analyzer-ex]] +==== Add to an analyzer + +The following <> request uses the `trim` +filter to configure a new <>. + +[source,console] +---- +PUT trim_example +{ + "settings": { + "analysis": { + "analyzer": { + "keyword_trim": { + "tokenizer": "keyword", + "filter": [ "trim" ] + } + } + } + } +} +---- \ No newline at end of file diff --git a/docs/reference/cat.asciidoc b/docs/reference/cat.asciidoc index 4c30d21693d99..d557a8c930a20 100644 --- a/docs/reference/cat.asciidoc +++ b/docs/reference/cat.asciidoc @@ -227,6 +227,8 @@ include::cat/alias.asciidoc[] include::cat/allocation.asciidoc[] +include::cat/anomaly-detectors.asciidoc[] + include::cat/count.asciidoc[] include::cat/dataframeanalytics.asciidoc[] diff --git a/docs/reference/cat/anomaly-detectors.asciidoc b/docs/reference/cat/anomaly-detectors.asciidoc new file mode 100644 index 0000000000000..cc88b2c2b3b3d --- /dev/null +++ b/docs/reference/cat/anomaly-detectors.asciidoc @@ -0,0 +1,280 @@ +[role="xpack"] +[testenv="platinum"] +[[cat-anomaly-detectors]] +=== cat anomaly detectors API +++++ +cat anomaly detectors +++++ + +Returns configuration and usage information about {anomaly-jobs}. + +[[cat-anomaly-detectors-request]] +==== {api-request-title} + +`GET /_cat/ml/anomaly_detectors/` + + +`GET /_cat/ml/anomaly_detectors` + +[[cat-anomaly-detectors-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See +<> and {ml-docs}/setup.html[Set up {ml-features}]. + + +[[cat-anomaly-detectors-desc]] +==== {api-description-title} + +See {ml-docs}/ml-jobs.html[{anomaly-jobs-cap}]. + +NOTE: This API returns a maximum of 10,000 jobs. + +[[cat-anomaly-detectors-path-params]] +==== {api-path-parms-title} + +``:: +(Optional, string) +include::{docdir}/ml/ml-shared.asciidoc[tag=job-id-anomaly-detection] + +[[cat-anomaly-detectors-query-params]] +==== {api-query-parms-title} + +`allow_no_jobs`:: +(Optional, boolean) +include::{docdir}/ml/ml-shared.asciidoc[tag=allow-no-jobs] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=bytes] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=http-format] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-h] ++ +If you do not specify which columns to include, the API returns the default +columns. If you explicitly specify one or more columns, it returns only the +specified columns. ++ +Valid columns are: + +`assignment_explanation`, `ae`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=assignment-explanation-anomaly-jobs] + +`buckets.count`, `bc`, `bucketsCount`::: +(Default) +include::{docdir}/ml/ml-shared.asciidoc[tag=bucket-count-anomaly-jobs] + +`buckets.time.exp_avg`, `btea`, `bucketsTimeExpAvg`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=bucket-time-exponential-average] + +`buckets.time.exp_avg_hour`, `bteah`, `bucketsTimeExpAvgHour`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=bucket-time-exponential-average-hour] + +`buckets.time.max`, `btmax`, `bucketsTimeMax`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=bucket-time-maximum] + +`buckets.time.min`, `btmin`, `bucketsTimeMin`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=bucket-time-minimum] + +`buckets.time.total`, `btt`, `bucketsTimeTotal`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=bucket-time-total] + +`data.buckets`, `db`, `dataBuckets`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=bucket-count] + +`data.earliest_record`, `der`, `dataEarliestRecord`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=earliest-record-timestamp] + +`data.empty_buckets`, `deb`, `dataEmptyBuckets`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=empty-bucket-count] + +`data.input_bytes`, `dib`, `dataInputBytes`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=input-bytes] + +`data.input_fields`, `dif`, `dataInputFields`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=input-field-count] + +`data.input_records`, `dir`, `dataInputRecords`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=input-record-count] + +`data.invalid_dates`, `did`, `dataInvalidDates`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=invalid-date-count] + +`data.last`, `dl`, `dataLast`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=last-data-time] + +`data.last_empty_bucket`, `dleb`, `dataLastEmptyBucket`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=latest-empty-bucket-timestamp] + +`data.last_sparse_bucket`, `dlsb`, `dataLastSparseBucket`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=latest-sparse-record-timestamp] + +`data.latest_record`, `dlr`, `dataLatestRecord`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=latest-record-timestamp] + +`data.missing_fields`, `dmf`, `dataMissingFields`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=missing-field-count] + +`data.out_of_order_timestamps`, `doot`, `dataOutOfOrderTimestamps`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=out-of-order-timestamp-count] + +`data.processed_fields`, `dpf`, `dataProcessedFields`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=processed-field-count] + +`data.processed_records`, `dpr`, `dataProcessedRecords`::: +(Default) +include::{docdir}/ml/ml-shared.asciidoc[tag=processed-record-count] + +`data.sparse_buckets`, `dsb`, `dataSparseBuckets`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=sparse-bucket-count] + +`forecasts.memory.avg`, `fmavg`, `forecastsMemoryAvg`::: +The average memory usage in bytes for forecasts related to the {anomaly-job}. + +`forecasts.memory.max`, `fmmax`, `forecastsMemoryMax`::: +The maximum memory usage in bytes for forecasts related to the {anomaly-job}. + +`forecasts.memory.min`, `fmmin`, `forecastsMemoryMin`::: +The minimum memory usage in bytes for forecasts related to the {anomaly-job}. + +`forecasts.memory.total`, `fmt`, `forecastsMemoryTotal`::: +The total memory usage in bytes for forecasts related to the {anomaly-job}. + +`forecasts.records.avg`, `fravg`, `forecastsRecordsAvg`::: +The average number of `model_forecast` documents written for forecasts related +to the {anomaly-job}. + +`forecasts.records.max`, `frmax`, `forecastsRecordsMax`::: +The maximum number of `model_forecast` documents written for forecasts related +to the {anomaly-job}. + +`forecasts.records.min`, `frmin`, `forecastsRecordsMin`::: +The minimum number of `model_forecast` documents written for forecasts related +to the {anomaly-job}. + +`forecasts.records.total`, `frt`, `forecastsRecordsTotal`::: +The total number of `model_forecast` documents written for forecasts related to +the {anomaly-job}. + +`forecasts.time.avg`, `ftavg`, `forecastsTimeAvg`::: +The average runtime in milliseconds for forecasts related to the {anomaly-job}. + +`forecasts.time.max`, `ftmax`, `forecastsTimeMax`::: +The maximum runtime in milliseconds for forecasts related to the {anomaly-job}. + +`forecasts.time.min`, `ftmin`, `forecastsTimeMin`::: +The minimum runtime in milliseconds for forecasts related to the {anomaly-job}. + +`forecasts.time.total`, `ftt`, `forecastsTimeTotal`::: +The total runtime in milliseconds for forecasts related to the {anomaly-job}. + +`forecasts.total`, `ft`, `forecastsTotal`::: +(Default) +include::{docdir}/ml/ml-shared.asciidoc[tag=forecast-total] + +`id`::: +(Default) +include::{docdir}/ml/ml-shared.asciidoc[tag=job-id-anomaly-detection] + +`model.bucket_allocation_failures`, `mbaf`, `modelBucketAllocationFailures`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=bucket-allocation-failures-count] + +`model.by_fields`, `mbf`, `modelByFields`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=total-by-field-count] + +`model.bytes`, `mb`, `modelBytes`::: +(Default) +include::{docdir}/ml/ml-shared.asciidoc[tag=model-bytes] + +`model.bytes_exceeded`, `mbe`, `modelBytesExceeded`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=model-bytes-exceeded] + +`model.categorization_status`, `mcs`, `modelCategorizationStatus`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=categorization-status] + +`model.categorized_doc_count`, `mcdc`, `modelCategorizedDocCount`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=categorized-doc-count] + +`model.dead_category_count`, `mdcc`, `modelDeadCategoryCount`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=dead-category-count] + +`model.frequent_category_count`, `mfcc`, `modelFrequentCategoryCount`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=frequent-category-count] + +`model.log_time`, `mlt`, `modelLogTime`::: +The timestamp when the model stats were gathered, according to server time. + +`model.memory_limit`, `mml`, `modelMemoryLimit`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=model-memory-limit-anomaly-jobs] + +`model.memory_status`, `mms`, `modelMemoryStatus`::: +(Default) +include::{docdir}/ml/ml-shared.asciidoc[tag=model-memory-status] + +`model.over_fields`, `mof`, `modelOverFields`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=total-over-field-count] + +`model.partition_fields`, `mpf`, `modelPartitionFields`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=total-partition-field-count] + +`model.rare_category_count`, `mrcc`, `modelRareCategoryCount`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=rare-category-count] + +`model.timestamp`, `mt`, `modelTimestamp`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=model-timestamp] + +`model.total_category_count`, `mtcc`, `modelTotalCategoryCount`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=total-category-count] + +`node.address`, `na`, `nodeAddress`::: +The network address of the node. ++ +include::{docdir}/ml/ml-shared.asciidoc[tag=node-jobs] + +`node.ephemeral_id`, `ne`, `nodeEphemeralId`::: +The ephemeral ID of the node. ++ +include::{docdir}/ml/ml-shared.asciidoc[tag=node-jobs] + +`node.id`, `ni`, `nodeId`::: +The unique identifier of the node. ++ +include::{docdir}/ml/ml-shared.asciidoc[tag=node-jobs] + +`node.name`, `nn`, `nodeName`::: +The node name. ++ +include::{docdir}/ml/ml-shared.asciidoc[tag=node-jobs] + +`opened_time`, `ot`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=open-time] + +`state`, `s`::: +(Default) +include::{docdir}/ml/ml-shared.asciidoc[tag=state-anomaly-job] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=help] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=time] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] + +[[cat-anomaly-detectors-example]] +==== {api-examples-title} + +[source,console] +-------------------------------------------------- +GET _cat/ml/anomaly_detectors?h=id,s,dpr,mb&v +-------------------------------------------------- +// TEST[skip:kibana sample data] + +[source,console-result] +---- +id s dpr mb +high_sum_total_sales closed 14022 1.5mb +low_request_rate closed 1216 40.5kb +response_code_rates closed 28146 132.7kb +url_scanning closed 28146 501.6kb +---- +// TESTRESPONSE[skip:kibana sample data] diff --git a/docs/reference/cat/datafeeds.asciidoc b/docs/reference/cat/datafeeds.asciidoc index 95a830aa82347..5764563375732 100644 --- a/docs/reference/cat/datafeeds.asciidoc +++ b/docs/reference/cat/datafeeds.asciidoc @@ -22,12 +22,14 @@ Returns configuration and usage information about {dfeeds}. `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See <> and {ml-docs}/setup.html[Set up {ml-features}]. -//// + [[cat-datafeeds-desc]] ==== {api-description-title} -TBD: This API returns a maximum of 10,000 {dfeeds}. -//// +{dfeeds-cap} retrieve data from {es} for analysis by {anomaly-jobs}. For more +information, see {ml-docs}/ml-dfeeds.html[{dfeeds-cap}]. + +NOTE: This API returns a maximum of 10,000 jobs. [[cat-datafeeds-path-params]] ==== {api-path-parms-title} @@ -46,94 +48,68 @@ include::{docdir}/ml/ml-shared.asciidoc[tag=allow-no-datafeeds] include::{docdir}/rest-api/common-parms.asciidoc[tag=http-format] include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-h] - -include::{docdir}/rest-api/common-parms.asciidoc[tag=help] - -include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s] - -include::{docdir}/rest-api/common-parms.asciidoc[tag=time] - -include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] - -[[cat-datafeeds-results]] -==== {api-response-body-title} - -`assignment_explanation`:: -include::{docdir}/ml/ml-shared.asciidoc[tag=assignment-explanation] + -To retrieve this information, specify the `ae` column in the `h` query parameter. +If you do not specify which columns to include, the API returns the default +columns. If you explicitly specify one or more columns, it returns only the +specified columns. ++ +Valid columns are: -`bucket.count`:: +`assignment_explanation`, `ae`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=assignment-explanation-datafeeds] + +`buckets.count`, `bc`, `bucketsCount`::: +(Default) include::{docdir}/ml/ml-shared.asciidoc[tag=bucket-count] -+ -To retrieve this information, specify the `bc` or `bucketCount` column in the -`h` query parameter. -`id`:: +`id`::: +(Default) include::{docdir}/ml/ml-shared.asciidoc[tag=datafeed-id] -+ -To retrieve this information, specify the `id` column in the `h` query parameter. - -`node.address`:: + +`node.address`, `na`, `nodeAddress`::: The network address of the node. -+ -include::{docdir}/ml/ml-shared.asciidoc[tag=node] + -To retrieve this information, specify the `na` or `nodeAddress` column in the -`h` query parameter. +include::{docdir}/ml/ml-shared.asciidoc[tag=node-datafeeds] -`node.ephemeral_id`:: +`node.ephemeral_id`, `ne`, `nodeEphemeralId`::: The ephemeral ID of the node. + -include::{docdir}/ml/ml-shared.asciidoc[tag=node] -+ -To retrieve this information, specify the `ne` or `nodeEphemeralId` column in -the `h` query parameter. +include::{docdir}/ml/ml-shared.asciidoc[tag=node-datafeeds] -`node.id`:: +`node.id`, `ni`, `nodeId`::: The unique identifier of the node. + -include::{docdir}/ml/ml-shared.asciidoc[tag=node] -+ -To retrieve this information, specify the `ni` or `nodeId` column in the `h` -query parameter. +include::{docdir}/ml/ml-shared.asciidoc[tag=node-datafeeds] -`node.name`:: +`node.name`, `nn`, `nodeName`::: The node name. + -include::{docdir}/ml/ml-shared.asciidoc[tag=node] -+ -To retrieve this information, specify the `nn` or `nodeName` column in the `h` -query parameter. +include::{docdir}/ml/ml-shared.asciidoc[tag=node-datafeeds] -`search.bucket_avg`:: +`search.bucket_avg`, `sba`, `searchBucketAvg`::: include::{docdir}/ml/ml-shared.asciidoc[tag=search-bucket-avg] -+ -To retrieve this information, specify the `sba` or `searchBucketAvg` column in -the `h` query parameter. -`search.count`:: +`search.count`, `sc`, `searchCount`::: +(Default) include::{docdir}/ml/ml-shared.asciidoc[tag=search-count] -+ -To retrieve this information, specify the `sc` or `searchCount` column in the -`h` query parameter. -`search.exp_avg_hour`:: +`search.exp_avg_hour`, `seah`, `searchExpAvgHour`::: include::{docdir}/ml/ml-shared.asciidoc[tag=search-exp-avg-hour] -+ -To retrieve this information, specify the `seah` or `searchExpAvgHour` column in -the `h` query parameter. -`search.time`:: +`search.time`, `st`, `searchTime`::: include::{docdir}/ml/ml-shared.asciidoc[tag=search-time] -+ -To retrieve this information, specify the `st` or `searchTime` column in the `h` -query parameter. -`state`:: +`state`, `s`::: +(Default) include::{docdir}/ml/ml-shared.asciidoc[tag=state-datafeed] -+ -To retrieve this information, specify the `s` column in the `h` query parameter. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=help] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=time] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] [[cat-datafeeds-example]] ==== {api-examples-title} @@ -146,7 +122,7 @@ GET _cat/ml/datafeeds?v [source,console-result] ---- -id state bucket.count search.count +id state buckets.count search.count datafeed-high_sum_total_sales stopped 743 7 datafeed-low_request_rate stopped 1457 3 datafeed-response_code_rates stopped 1460 18 diff --git a/docs/reference/eql/search.asciidoc b/docs/reference/eql/search.asciidoc index acc061d5457d5..58299252dfa14 100644 --- a/docs/reference/eql/search.asciidoc +++ b/docs/reference/eql/search.asciidoc @@ -16,18 +16,19 @@ The following <> request adds some example log data to the [source,console] ---- PUT sec_logs/_bulk?refresh -{"index":{"_index" : "sec_logs"}} +{"index":{"_index" : "sec_logs", "_id" : "1"}} { "@timestamp": "2020-12-07T11:06:07.000Z", "agent": { "id": "8a4f500d" }, "event": { "category": "process" }, "process": { "name": "cmd.exe", "path": "C:\\Windows\\System32\\cmd.exe" } } -{"index":{"_index" : "sec_logs"}} -{ "@timestamp": "2020-12-07T11:07:08.000Z", "agent": { "id": "8a4f500d" }, "event": { "category": "image_load" }, "file": { "name": "cmd.exe", "path": "C:\\Windows\\System32\\cmd.exe" }, "process": { "name": "cmd.exe", "path": "C:\\Windows\\System32\\cmd.exe" } } -{"index":{"_index" : "sec_logs"}} +{"index":{"_index" : "sec_logs", "_id" : "2"}} +{ "@timestamp": "2020-12-07T11:07:08.000Z", "agent": { "id": "8a4f500d" }, "event": { "category": "file" }, "file": { "accessed": "2020-12-07T11:07:08.000Z", "name": "cmd.exe", "path": "C:\\Windows\\System32\\cmd.exe", "type": "file", "size": 16384 }, "process": { "name": "cmd.exe", "path": "C:\\Windows\\System32\\cmd.exe" } } +{"index":{"_index" : "sec_logs", "_id" : "3"}} { "@timestamp": "2020-12-07T11:07:09.000Z", "agent": { "id": "8a4f500d" }, "event": { "category": "process" }, "process": { "name": "regsvr32.exe", "path": "C:\\Windows\\System32\\regsvr32.exe" } } ---- +// TESTSETUP You can now use the EQL search API to search this index using an EQL query. The following request searches the `sec_logs` index using the EQL query -specified in the `rule` parameter. The EQL query matches events with an +specified in the `query` parameter. The EQL query matches events with an `event.category` of `process` that have a `process.name` of `cmd.exe`. [source,console] @@ -35,13 +36,125 @@ specified in the `rule` parameter. The EQL query matches events with an GET sec_logs/_eql/search { "event_type_field": "event.category", - "rule": """ + "query": """ process where process.name == "cmd.exe" """ } ---- -// TEST[continued] Because the `sec_log` index follows the ECS, you don't need to specify the -event type or timestamp fields. The request uses the `event.category` and -`@timestamp` fields by default. +timestamp fields. The request uses the `@timestamp` field by default. + +The API returns the following response containing the matching event: + +[source,console-result] +---- +{ + "took": 3, + "timed_out": false, + "hits": { + "total": { + "value": 1, + "relation": "eq" + }, + "events": [ + { + "_index": "sec_logs", + "_id": "1", + "_score": 0.9400072, + "_source": { + "@timestamp": "2020-12-07T11:06:07.000Z", + "agent": { + "id": "8a4f500d" + }, + "event": { + "category": "process" + }, + "process": { + "name": "cmd.exe", + "path": "C:\\Windows\\System32\\cmd.exe" + } + } + } + ] + } +} +---- +// TESTRESPONSE[s/"took": 3/"took": $body.took/] + +[discrete] +[[eql-search-specify-event-type-field]] +=== Specify an event type field + +The EQL search API uses `event_type` as the required <> by default. You can use the `event_type_field` parameter to specify +another event type field. + +For example, the following request specifies `file.type` as the event type +field. + +[source,console] +---- +GET sec_logs/_eql/search +{ + "event_type_field": "file.type", + "query": """ + file where agent.id == "8a4f500d" + """ +} +---- + +[discrete] +[[eql-search-specify-timestamp-field]] +=== Specify a timestamp field + +The EQL search API uses `@timestamp` as the required <> by default. You can use the `timestamp_field` parameter to +specify another timestamp field. + +For example, the following request specifies `file.accessed` as the event +timestamp field. + +[source,console] +---- +GET sec_logs/_eql/search +{ + "timestamp_field": "file.accessed", + "event_type_field": "event.category", + "query": """ + file where (file.size > 1 and file.type == "file") + """ +} +---- + +[discrete] +[[eql-search-filter-query-dsl]] +=== Filter using query DSL + +You can use the `filter` parameter to specify an additional query using +<>. This query filters the documents on which the EQL query +runs. + +For example, the following request uses a `range` query to filter the `sec_logs` +index down to only documents with a `file.size` value greater than `1` but less +than `1000000` bytes. The EQL query in `query` parameter then runs on these +filtered documents. + +[source,console] +---- +GET sec_logs/_eql/search +{ + "event_type_field": "event.category", + "filter": { + "range" : { + "file.size" : { + "gte" : 1, + "lte" : 1000000 + } + } + }, + "query": """ + file where (file.type == "file" and file.name == "cmd.exe") + """ +} +---- diff --git a/docs/reference/how-to/search-speed.asciidoc b/docs/reference/how-to/search-speed.asciidoc index 2ea7bee83da97..4750b9c07fd61 100644 --- a/docs/reference/how-to/search-speed.asciidoc +++ b/docs/reference/how-to/search-speed.asciidoc @@ -418,3 +418,115 @@ The <> field has an <> option that indexes prefixes of all terms and is automatically leveraged by query parsers to run prefix queries. If your use-case involves running lots of prefix queries, this can speed up queries significantly. + +[[faster-filtering-with-constant-keyword]] +=== Use <> to speed up filtering + +There is a general rule that the cost of a filter is mostly a function of the +number of matched documents. Imagine that you have an index containing cycles. +There are a large number of bicycles and many searches perform a filter on +`cycle_type: bicycle`. This very common filter is unfortunately also very costly +since it matches most documents. There is a simple way to avoid running this +filter: move bicycles to their own index and filter bicycles by searching this +index instead of adding a filter to the query. + +Unfortunately this can make client-side logic tricky, which is where +`constant_keyword` helps. By mapping `cycle_type` as a `constant_keyword` with +value `bicycle` on the index that contains bicycles, clients can keep running +the exact same queries as they used to run on the monolithic index and +Elasticsearch will do the right thing on the bicycles index by ignoring filters +on `cycle_type` if the value is `bicycle` and returning no hits otherwise. + +Here is what mappings could look like: + +[source,console] +-------------------------------------------------- +PUT bicycles +{ + "mappings": { + "properties": { + "cycle_type": { + "type": "constant_keyword", + "value": "bicycle" + }, + "name": { + "type": "text" + } + } + } +} + +PUT other_cycles +{ + "mappings": { + "properties": { + "cycle_type": { + "type": "keyword" + }, + "name": { + "type": "text" + } + } + } +} +-------------------------------------------------- + +We are splitting our index in two: one that will contain only bicycles, and +another one that contains other cycles: unicycles, tricycles, etc. Then at +search time, we need to search both indices, but we don't need to modify +queries. + + +[source,console] +-------------------------------------------------- +GET bicycles,other_cycles/_search +{ + "query": { + "bool": { + "must": { + "match": { + "description": "dutch" + } + }, + "filter": { + "term": { + "cycle_type": "bicycle" + } + } + } + } +} +-------------------------------------------------- +// TEST[continued] + +On the `bicycles` index, Elasticsearch will simply ignore the `cycle_type` +filter and rewrite the search request to the one below: + +[source,console] +-------------------------------------------------- +GET bicycles,other_cycles/_search +{ + "query": { + "match": { + "description": "dutch" + } + } +} +-------------------------------------------------- +// TEST[continued] + +On the `other_cycles` index, Elasticsearch will quickly figure out that +`bicycle` doesn't exist in the terms dictionary of the `cycle_type` field and +return a search response with no hits. + +This is a powerful way of making queries cheaper by putting common values in a +dedicated index. This idea can also be combined across multiple fields: for +instance if you track the color of each cycle and your `bicycles` index ends up +having a majority of black bikes, you could split it into a `bicycles-black` +and a `bicycles-other-colors` indices. + +The `constant_keyword` is not strictly required for this optimization: it is +also possible to update the client-side logic in order to route queries to the +relevant indices based on filters. However `constant_keyword` makes it +transparently and allows to decouple search requests from the index topology in +exchange of very little overhead. diff --git a/docs/reference/mapping/params/index-options.asciidoc b/docs/reference/mapping/params/index-options.asciidoc index da66d2489cd30..8dc48b46e5b14 100644 --- a/docs/reference/mapping/params/index-options.asciidoc +++ b/docs/reference/mapping/params/index-options.asciidoc @@ -2,36 +2,33 @@ === `index_options` The `index_options` parameter controls what information is added to the -inverted index, for search and highlighting purposes. It accepts the -following settings: +inverted index for search and highlighting purposes. -[horizontal] -`docs`:: - - Only the doc number is indexed. Can answer the question _Does this term - exist in this field?_ +[WARNING] +==== +The `index_options` parameter is intended for use with <> fields +only. Avoid using `index_options` with other field datatypes. +==== -`freqs`:: +It accepts the following values: - Doc number and term frequencies are indexed. Term frequencies are used to - score repeated terms higher than single terms. +`docs`:: +Only the doc number is indexed. Can answer the question _Does this term +exist in this field?_ -`positions`:: +`freqs`:: +Doc number and term frequencies are indexed. Term frequencies are used to +score repeated terms higher than single terms. - Doc number, term frequencies, and term positions (or order) are indexed. - Positions can be used for - <>. +`positions` (default):: +Doc number, term frequencies, and term positions (or order) are indexed. +Positions can be used for +<>. `offsets`:: - - Doc number, term frequencies, positions, and start and end character - offsets (which map the term back to the original string) are indexed. - Offsets are used by the <> to speed up highlighting. - -NOTE: <> don't support the `index_options` parameter any longer. - -<> fields use `positions` as the default, and all other fields use -`docs` as the default. +Doc number, term frequencies, positions, and start and end character +offsets (which map the term back to the original string) are indexed. +Offsets are used by the <> to speed up highlighting. [source,console] -------------------------------------------------- diff --git a/docs/reference/mapping/types.asciidoc b/docs/reference/mapping/types.asciidoc index 1472d1bf641eb..1b5afad31f0c6 100644 --- a/docs/reference/mapping/types.asciidoc +++ b/docs/reference/mapping/types.asciidoc @@ -57,6 +57,8 @@ string:: <> and <> <>:: `histogram` for pre-aggregated numerical values for percentiles aggregations. +<>:: Specialization of `keyword` for the case when all documents have the same value. + [float] [[types-array-handling]] === Arrays @@ -126,4 +128,6 @@ include::types/text.asciidoc[] include::types/token-count.asciidoc[] -include::types/shape.asciidoc[] \ No newline at end of file +include::types/shape.asciidoc[] + +include::types/constant-keyword.asciidoc[] diff --git a/docs/reference/mapping/types/constant-keyword.asciidoc b/docs/reference/mapping/types/constant-keyword.asciidoc new file mode 100644 index 0000000000000..3f9008d265d25 --- /dev/null +++ b/docs/reference/mapping/types/constant-keyword.asciidoc @@ -0,0 +1,85 @@ +[role="xpack"] +[testenv="basic"] + +[[constant-keyword]] +=== Constant keyword datatype +++++ +Constant keyword +++++ + +Constant keyword is a specialization of the <> field for +the case that all documents in the index have the same value. + +[source,console] +-------------------------------- +PUT logs-debug +{ + "mappings": { + "properties": { + "@timestamp": { + "type": "date" + }, + "message": { + "type": "text" + }, + "level": { + "type": "constant_keyword", + "value": "debug" + } + } + } +} +-------------------------------- + +`constant_keyword` supports the same queries and aggregations as `keyword` +fields do, but takes advantage of the fact that all documents have the same +value per index to execute queries more efficiently. + +It is both allowed to submit documents that don't have a value for the field or +that have a value equal to the value configured in mappings. The two below +indexing requests are equivalent: + +[source,console] +-------------------------------- +POST logs-debug/_doc +{ + "date": "2019-12-12", + "message": "Starting up Elasticsearch", + "level": "debug" +} + +POST logs-debug/_doc +{ + "date": "2019-12-12", + "message": "Starting up Elasticsearch" +} +-------------------------------- +//TEST[continued] + +However providing a value that is different from the one configured in the +mapping is disallowed. + +In case no `value` is provided in the mappings, the field will automatically +configure itself based on the value contained in the first indexed document. +While this behavior can be convenient, note that it means that a single +poisonous document can cause all other documents to be rejected if it had a +wrong value. + +The `value` of the field cannot be changed after it has been set. + +[[constant-keyword-params]] +==== Parameters for constant keyword fields + +The following mapping parameters are accepted: + +[horizontal] + +<>:: + + Metadata about the field. + +`value`:: + + The value to associate with all documents in the index. If this parameter + is not provided, it is set based on the first document that gets indexed. + diff --git a/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc index 463d17771cc2c..d290946c99574 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc @@ -69,7 +69,7 @@ informational; you cannot update their values. `assignment_explanation`:: (string) -include::{docdir}/ml/ml-shared.asciidoc[tag=assignment-explanation] +include::{docdir}/ml/ml-shared.asciidoc[tag=assignment-explanation-datafeeds] `datafeed_id`:: (string) @@ -77,10 +77,16 @@ include::{docdir}/ml/ml-shared.asciidoc[tag=datafeed-id] `node`:: (object) -include::{docdir}/ml/ml-shared.asciidoc[tag=node] -`node`.`id`::: The unique identifier of the node. For example, "0-o0tOoRTwKFZifatTWKNw". +include::{docdir}/ml/ml-shared.asciidoc[tag=node-datafeeds] + +`node`.`id`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=node-id] + `node`.`name`::: The node name. For example, `0-o0tOo`. -`node`.`ephemeral_id`::: The node ephemeral ID. + +`node`.`ephemeral_id`::: +include::{docdir}/ml/ml-shared.asciidoc[tag=node-ephemeral-id] + `node`.`transport_address`::: The host and port where transport HTTP connections are accepted. For example, `127.0.0.1:9300`. `node`.`attributes`::: For example, `{"ml.machine_memory": "17179869184"}`. diff --git a/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc index 0afed42ca98f1..ea06bb512de2f 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc @@ -57,8 +57,8 @@ The API returns the following information about the operational progress of a job: `assignment_explanation`:: -(string) For open jobs only, contains messages relating to the selection -of a node to run the job. +(string) +include::{docdir}/ml/ml-shared.asciidoc[tag=assignment-explanation-anomaly-jobs] [[datacounts]]`data_counts`:: (object) An object that describes the quantity of input to the job and any @@ -67,85 +67,73 @@ a job. If a model snapshot is reverted or old results are deleted, the job counts are not reset. `data_counts`.`bucket_count`::: -(long) The number of bucket results produced by the job. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=bucket-count-anomaly-jobs] `data_counts`.`earliest_record_timestamp`::: -(date) The timestamp of the earliest chronologically input document. +(date) +include::{docdir}/ml/ml-shared.asciidoc[tag=earliest-record-timestamp] `data_counts`.`empty_bucket_count`::: -(long) The number of buckets which did not contain any data. If your data -contains many empty buckets, consider increasing your `bucket_span` or using -functions that are tolerant to gaps in data such as `mean`, `non_null_sum` or -`non_zero_count`. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=empty-bucket-count] `data_counts`.`input_bytes`::: -(long) The number of bytes of input data posted to the job. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=input-bytes] `data_counts`.`input_field_count`::: -(long) The total number of fields in input documents posted to the job. This -count includes fields that are not used in the analysis. However, be aware that -if you are using a {dfeed}, it extracts only the required fields from the -documents it retrieves before posting them to the job. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=input-field-count] `data_counts`.`input_record_count`::: -(long) The number of input documents posted to the job. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=input-record-count] `data_counts`.`invalid_date_count`::: -(long) The number of input documents with either a missing date field or a date -that could not be parsed. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=invalid-date-count] `data_counts`.`job_id`::: (string) include::{docdir}/ml/ml-shared.asciidoc[tag=job-id-anomaly-detection] `data_counts`.`last_data_time`::: -(date) The timestamp at which data was last analyzed, according to server time. +(date) +include::{docdir}/ml/ml-shared.asciidoc[tag=last-data-time] `data_counts`.`latest_empty_bucket_timestamp`::: -(date) The timestamp of the last bucket that did not contain any data. +(date) +include::{docdir}/ml/ml-shared.asciidoc[tag=latest-empty-bucket-timestamp] `data_counts`.`latest_record_timestamp`::: -(date) The timestamp of the latest chronologically input document. +(date) +include::{docdir}/ml/ml-shared.asciidoc[tag=latest-record-timestamp] `data_counts`.`latest_sparse_bucket_timestamp`::: -(date) The timestamp of the last bucket that was considered sparse. +(date) +include::{docdir}/ml/ml-shared.asciidoc[tag=latest-sparse-record-timestamp] `data_counts`.`missing_field_count`::: -(long) The number of input documents that are missing a field that the job is -configured to analyze. Input documents with missing fields are still processed -because it is possible that not all fields are missing. The value of -`processed_record_count` includes this count. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=missing-field-count] + --- -NOTE: If you are using {dfeeds} or posting data to the job in JSON format, a -high `missing_field_count` is often not an indication of data issues. It is not -necessarily a cause for concern. - --- +The value of `processed_record_count` includes this count. `data_counts`.`out_of_order_timestamp_count`::: -(long) The number of input documents that are out of time sequence and outside -of the latency window. This information is applicable only when you provide data -to the job by using the <>. These out of order -documents are discarded, since jobs require time series data to be in ascending -chronological order. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=out-of-order-timestamp-count] `data_counts`.`processed_field_count`::: -(long) The total number of fields in all the documents that have been processed -by the job. Only fields that are specified in the detector configuration object -contribute to this count. The timestamp is not included in this count. +include::{docdir}/ml/ml-shared.asciidoc[tag=processed-field-count] `data_counts`.`processed_record_count`::: -(long) The number of input documents that have been processed by the job. This -value includes documents with missing fields, since they are nonetheless -analyzed. If you use {dfeeds} and have aggregations in your search query, the -`processed_record_count` will be the number of aggregation results processed, -not the number of {es} documents. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=processed-record-count] `data_counts`.`sparse_bucket_count`::: -(long) The number of buckets that contained few data points compared to the -expected number of data points. If your data contains many sparse buckets, -consider using a longer `bucket_span`. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=sparse-bucket-count] [[forecastsstats]]`forecasts_stats`:: (object) An object that provides statistical information about forecasts @@ -167,8 +155,9 @@ value of `1` indicates that at least one forecast exists. related to this job. If there are no forecasts, this property is omitted. `forecasts_stats`.`records`::: -(object) The `avg`, `min`, `max` and `total` number of model_forecast documents -written for forecasts related to this job. If there are no forecasts, this property is omitted. +(object) The `avg`, `min`, `max` and `total` number of `model_forecast` documents +written for forecasts related to this job. If there are no forecasts, this +property is omitted. `forecasts_stats`.`processing_time_ms`::: (object) The `avg`, `min`, `max` and `total` runtime in milliseconds for @@ -179,8 +168,8 @@ forecasts related to this job. If there are no forecasts, this property is omitt {"finished" : 2, "started" : 1}. If there are no forecasts, this property is omitted. `forecasts_stats`.`total`::: -(long) The number of individual forecasts currently available for this job. A -value of `1` or more indicates that forecasts exist. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=forecast-total] `job_id`:: (string) @@ -191,38 +180,24 @@ include::{docdir}/ml/ml-shared.asciidoc[tag=job-id-anomaly-detection] model. It has the following properties: `model_size_stats`.`bucket_allocation_failures_count`::: -(long) The number of buckets for which new entities in incoming data were not -processed due to insufficient model memory. This situation is also signified -by a `hard_limit: memory_status` property value. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=bucket-allocation-failures-count] `model_size_stats`.`categorized_doc_count`::: -(long) The number of documents that have had a field categorized. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=categorized-doc-count] `model_size_stats`.`categorization_status`::: -(string) The status of categorization for this job. -Contains one of the following values. -+ --- -* `ok`: Categorization is performing acceptably well (or not being -used at all). -* `warn`: Categorization is detecting a distribution of categories -that suggests the input data is inappropriate for categorization. -Problems could be that there is only one category, more than 90% of -categories are rare, the number of categories is greater than 50% of -the number of categorized documents, there are no frequently -matched categories, or more than 50% of categories are dead. - --- +(string) +include::{docdir}/ml/ml-shared.asciidoc[tag=categorization-status] `model_size_stats`.`dead_category_count`::: -(long) The number of categories created by categorization that will -never be assigned again because another category's definition -makes it a superset of the dead category. (Dead categories are a -side effect of the way categorization has no prior training.) +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=dead-category-count] `model_size_stats`.`frequent_category_count`::: -(long) The number of categories that match more than 1% of categorized -documents. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=frequent-category-count] `model_size_stats`.`job_id`::: (string) @@ -232,53 +207,47 @@ include::{docdir}/ml/ml-shared.asciidoc[tag=job-id-anomaly-detection] (date) The timestamp of the `model_size_stats` according to server time. `model_size_stats`.`memory_status`::: -(string) The status of the mathematical models. This property can have one of -the following values: -+ --- -* `ok`: The models stayed below the configured value. -* `soft_limit`: The models used more than 60% of the configured memory limit -and older unused models will be pruned to free up space. -* `hard_limit`: The models used more space than the configured memory limit. -As a result, not all incoming data was processed. --- +(string) +include::{docdir}/ml/ml-shared.asciidoc[tag=model-memory-status] `model_size_stats`.`model_bytes`::: -(long) The number of bytes of memory used by the models. This is the maximum -value since the last time the model was persisted. If the job is closed, -this value indicates the latest size. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=model-bytes] `model_size_stats`.`model_bytes_exceeded`::: - (long) The number of bytes over the high limit for memory usage at the last - allocation failure. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=model-bytes-exceeded] `model_size_stats`.`model_bytes_memory_limit`::: -(long) The upper limit for memory usage, checked on increasing values. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=model-memory-limit-anomaly-jobs] `model_size_stats`.`rare_category_count`::: -(long) The number of categories that match just one categorized document. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=rare-category-count] `model_size_stats`.`result_type`::: (string) For internal use. The type of result. `model_size_stats`.`total_by_field_count`::: -(long) The number of `by` field values that were analyzed by the models. This -value is cumulative for all detectors. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=total-by-field-count] `model_size_stats`.`total_category_count`::: -(long) The number of categories created by categorization. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=total-category-count] `model_size_stats`.`total_over_field_count`::: -(long) The number of `over` field values that were analyzed by the models. This -value is cumulative for all detectors. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=total-over-field-count] `model_size_stats`.`total_partition_field_count`::: -(long) The number of `partition` field values that were analyzed by the models. -This value is cumulative for all detectors. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=total-partition-field-count] `model_size_stats`.`timestamp`::: -(date) The timestamp of the `model_size_stats` according to the timestamp of the -data. +(date) +include::{docdir}/ml/ml-shared.asciidoc[tag=model-timestamp] [[stats-node]]`node`:: (object) Contains properties for the node that runs the job. This information is @@ -289,10 +258,12 @@ available only for open jobs. `{"ml.machine_memory": "17179869184", "ml.max_open_jobs" : "20"}`. `node`.`ephemeral_id`::: -(string) The ephemeral ID of the node. +(string) +include::{docdir}/ml/ml-shared.asciidoc[tag=node-ephemeral-id] `node`.`id`::: -(string) The unique identifier of the node. +(string) +include::{docdir}/ml/ml-shared.asciidoc[tag=node-id] `node`.`name`::: (string) The node name. @@ -301,26 +272,12 @@ available only for open jobs. (string) The host and port where transport HTTP connections are accepted. `open_time`:: -(string) For open jobs only, the elapsed time for which the job has been open. -For example, `28746386s`. +(string) +include::{docdir}/ml/ml-shared.asciidoc[tag=open-time] `state`:: -(string) The status of the job, which can be one of the following values: -+ --- -* `closed`: The job finished successfully with its model state persisted. The -job must be opened before it can accept further data. -* `closing`: The job close action is in progress and has not yet completed. A -closing job cannot accept further data. -* `failed`: The job did not finish successfully due to an error. This situation -can occur due to invalid input data, a fatal error occurring during the analysis, -or an external interaction such as the process being killed by the Linux out of -memory (OOM) killer. If the job had irrevocably failed, it must be force closed -and then deleted. If the {dfeed} can be corrected, the job can be closed and -then re-opened. -* `opened`: The job is available to receive and process data. -* `opening`: The job open action is in progress and has not yet completed. --- +(string) +include::{docdir}/ml/ml-shared.asciidoc[tag=state-anomaly-job] [[timingstats]]`timing_stats`:: (object) An object that provides statistical information about timing aspect of @@ -330,28 +287,32 @@ this job. It has the following properties: (double) Average of all bucket processing times in milliseconds. `timing_stats`.`bucket_count`::: -(long) The number of buckets processed. +(long) +include::{docdir}/ml/ml-shared.asciidoc[tag=bucket-count] `timing_stats`.`exponential_average_bucket_processing_time_ms`::: -(double) Exponential moving average of all bucket processing times in -milliseconds. +(double) +include::{docdir}/ml/ml-shared.asciidoc[tag=bucket-time-exponential-average] `timing_stats`.`exponential_average_bucket_processing_time_per_hour_ms`::: -(double) Exponentially-weighted moving average of bucket processing times -calculated in a 1 hour time window. +(double) +include::{docdir}/ml/ml-shared.asciidoc[tag=bucket-time-exponential-average-hour] `timing_stats`.`job_id`::: (string) include::{docdir}/ml/ml-shared.asciidoc[tag=job-id-anomaly-detection] `timing_stats`.`maximum_bucket_processing_time_ms`::: -(double) Maximum among all bucket processing times in milliseconds. +(double) +include::{docdir}/ml/ml-shared.asciidoc[tag=bucket-time-maximum] `timing_stats`.`minimum_bucket_processing_time_ms`::: -(double) Minimum among all bucket processing times in milliseconds. +(double) +include::{docdir}/ml/ml-shared.asciidoc[tag=bucket-time-minimum] `timing_stats`.`total_bucket_processing_time_ms`::: -(double) Sum of all bucket processing times in milliseconds. +(double) +include::{docdir}/ml/ml-shared.asciidoc[tag=bucket-time-total] [[ml-get-job-stats-response-codes]] ==== {api-response-codes-title} diff --git a/docs/reference/ml/ml-shared.asciidoc b/docs/reference/ml/ml-shared.asciidoc index dbd1c0ef7fa4d..351557c40afa7 100644 --- a/docs/reference/ml/ml-shared.asciidoc +++ b/docs/reference/ml/ml-shared.asciidoc @@ -137,9 +137,14 @@ tag::analyzed-fields-includes[] An array of strings that defines the fields that will be included in the analysis. end::analyzed-fields-includes[] -tag::assignment-explanation[] +tag::assignment-explanation-anomaly-jobs[] +For open {anomaly-jobs} only, contains messages relating to the selection +of a node to run the job. +end::assignment-explanation-anomaly-jobs[] + +tag::assignment-explanation-datafeeds[] For started {dfeeds} only, contains messages relating to the selection of a node. -end::assignment-explanation[] +end::assignment-explanation-datafeeds[] tag::assignment-explanation-dfanalytics[] Contains messages relating to the selection of a node. @@ -158,10 +163,20 @@ so do not set the `background_persist_interval` value too low. -- end::background-persist-interval[] +tag::bucket-allocation-failures-count[] +The number of buckets for which new entities in incoming data were not processed +due to insufficient model memory. This situation is also signified by a +`hard_limit: memory_status` property value. +end::bucket-allocation-failures-count[] + tag::bucket-count[] The number of buckets processed. end::bucket-count[] +tag::bucket-count-anomaly-jobs[] +The number of bucket results produced by the job. +end::bucket-count-anomaly-jobs[] + tag::bucket-span[] The size of the interval that the analysis is aggregated into, typically between `5m` and `1h`. The default value is `5m`. If the {anomaly-job} uses a {dfeed} @@ -175,6 +190,27 @@ The length of the bucket in seconds. This value matches the `bucket_span` that is specified in the job. end::bucket-span-results[] +tag::bucket-time-exponential-average[] +Exponential moving average of all bucket processing times, in milliseconds. +end::bucket-time-exponential-average[] + +tag::bucket-time-exponential-average-hour[] +Exponentially-weighted moving average of bucket processing times +calculated in a 1 hour time window, in milliseconds. +end::bucket-time-exponential-average-hour[] + +tag::bucket-time-maximum[] +Maximum among all bucket processing times, in milliseconds. +end::bucket-time-maximum[] + +tag::bucket-time-minimum[] +Minimum among all bucket processing times, in milliseconds. +end::bucket-time-minimum[] + +tag::bucket-time-total[] +Sum of all bucket processing times, in milliseconds. +end::bucket-time-total[] + tag::by-field-name[] The field used to split the data. In particular, this property is used for analyzing the splits with respect to their own history. It is used for finding @@ -252,6 +288,24 @@ customize the tokenizer or post-tokenization filtering, use the `pattern_replace` character filters. The effect is exactly the same. end::categorization-filters[] +tag::categorization-status[] +The status of categorization for the job. Contains one of the following values: ++ +-- +* `ok`: Categorization is performing acceptably well (or not being used at all). +* `warn`: Categorization is detecting a distribution of categories that suggests +the input data is inappropriate for categorization. Problems could be that there +is only one category, more than 90% of categories are rare, the number of +categories is greater than 50% of the number of categorized documents, there are +no frequently matched categories, or more than 50% of categories are dead. + +-- +end::categorization-status[] + +tag::categorized-doc-count[] +The number of documents that have had a field categorized. +end::categorized-doc-count[] + tag::char-filter[] One or more <>. In addition to the built-in character filters, other plugins can provide more character filters. @@ -263,7 +317,6 @@ add them here as <>. end::char-filter[] - tag::compute-feature-influence[] If `true`, the feature influence calculation is enabled. Defaults to `true`. end::compute-feature-influence[] @@ -451,13 +504,25 @@ sorted by the `id` value in ascending order. `progress`::: (array) The progress report of the {dfanalytics-job} by phase. -`phase`::: +`phase`:::: (string) Defines the phase of the {dfanalytics-job}. Possible phases: `reindexing`, `loading_data`, `analyzing`, and `writing_results`. -`progress_percent`::: +`progress_percent`:::: (integer) The progress that the {dfanalytics-job} has made expressed in percentage. + +`memory_usage`::: +(Optional, Object) An object describing memory usage of the analytics. +It will be present only after the job has started and memory usage has +been reported. + +`timestamp`:::: +(date) The timestamp when memory usage was calculated. + +`peak_usage_bytes`:::: +(long) The number of bytes used at the highest peak of memory usage. + end::data-frame-analytics-stats[] tag::datafeed-id[] @@ -472,6 +537,13 @@ Identifier for the {dfeed}. It can be a {dfeed} identifier or a wildcard expression. end::datafeed-id-wildcard[] +tag::dead-category-count[] +The number of categories created by categorization that will never be assigned +again because another category's definition makes it a superset of the dead +category. (Dead categories are a side effect of the way categorization has no +prior training.) +end::dead-category-count[] + tag::decompress-definition[] Specifies whether the included model definition should be returned as a JSON map (`true`) or in a custom compressed format (`false`). Defaults to `true`. @@ -552,6 +624,17 @@ A unique identifier for the detector. This identifier is based on the order of the detectors in the `analysis_config`, starting at zero. end::detector-index[] +tag::earliest-record-timestamp[] +The timestamp of the earliest chronologically input document. +end::earliest-record-timestamp[] + +tag::empty-bucket-count[] +The number of buckets which did not contain any data. If your data +contains many empty buckets, consider increasing your `bucket_span` or using +functions that are tolerant to gaps in data such as `mean`, `non_null_sum` or +`non_zero_count`. +end::empty-bucket-count[] + tag::eta[] Advanced configuration option. The shrinkage applied to the weights. Smaller values result in larger forests which have better generalization error. However, @@ -618,6 +701,11 @@ tag::filter-id[] A string that uniquely identifies a filter. end::filter-id[] +tag::forecast-total[] +The number of individual forecasts currently available for the job. A value of +`1` or more indicates that forecasts exist. +end::forecast-total[] + tag::frequency[] The interval at which scheduled queries are made while the {dfeed} runs in real time. The default value is either the bucket span for short bucket spans, or, @@ -628,6 +716,10 @@ bucket results. If the {dfeed} uses aggregations, this value must be divisible by the interval of the date histogram aggregation. end::frequency[] +tag::frequent-category-count[] +The number of categories that match more than 1% of categorized documents. +end::frequent-category-count[] + tag::from[] Skips the specified number of {dfanalytics-jobs}. The default value is `0`. end::from[] @@ -688,6 +780,26 @@ is available as part of the input data. When you use multiple detectors, the use of influencers is recommended as it aggregates results for each influencer entity. end::influencers[] +tag::input-bytes[] +The number of bytes of input data posted to the {anomaly-job}. +end::input-bytes[] + +tag::input-field-count[] +The total number of fields in input documents posted to the {anomaly-job}. This +count includes fields that are not used in the analysis. However, be aware that +if you are using a {dfeed}, it extracts only the required fields from the +documents it retrieves before posting them to the job. +end::input-field-count[] + +tag::input-record-count[] +The number of input documents posted to the {anomaly-job}. +end::input-record-count[] + +tag::invalid-date-count[] +The number of input documents with either a missing date field or a date that +could not be parsed. +end::invalid-date-count[] + tag::is-interim[] If `true`, this is an interim result. In other words, the results are calculated based on partial input data. @@ -753,6 +865,10 @@ relevant relationships between the features and the {depvar}. The smaller this parameter the larger individual trees will be and the longer train will take. end::lambda[] +tag::last-data-time[] +The timestamp at which data was last analyzed, according to server time. +end::last-data-time[] + tag::latency[] The size of the window in which to expect data that is out of time order. The default value is 0 (no latency). If you specify a non-zero value, it must be @@ -766,6 +882,18 @@ the <> API. -- end::latency[] +tag::latest-empty-bucket-timestamp[] +The timestamp of the last bucket that did not contain any data. +end::latest-empty-bucket-timestamp[] + +tag::latest-record-timestamp[] +The timestamp of the latest chronologically input document. +end::latest-record-timestamp[] + +tag::latest-sparse-record-timestamp[] +The timestamp of the last bucket that was considered sparse. +end::latest-sparse-record-timestamp[] + tag::max-empty-searches[] If a real-time {dfeed} has never seen any data (including during any initial training period) then it will automatically stop itself and close its associated @@ -803,6 +931,19 @@ ensemble method. Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`. end::method[] +tag::missing-field-count[] +The number of input documents that are missing a field that the {anomaly-job} is +configured to analyze. Input documents with missing fields are still processed +because it is possible that not all fields are missing. ++ +-- +NOTE: If you are using {dfeeds} or posting data to the job in JSON format, a +high `missing_field_count` is often not an indication of data issues. It is not +necessarily a cause for concern. + +-- +end::missing-field-count[] + tag::mode[] There are three available modes: + @@ -814,6 +955,17 @@ recommended value. -- end::mode[] +tag::model-bytes[] +The number of bytes of memory used by the models. This is the maximum value +since the last time the model was persisted. If the job is closed, this value +indicates the latest size. +end::model-bytes[] + +tag::model-bytes-exceeded[] +The number of bytes over the high limit for memory usage at the last allocation +failure. +end::model-bytes-exceeded[] + tag::model-id[] The unique identifier of the trained {infer} model. end::model-id[] @@ -843,6 +995,10 @@ see <>. -- end::model-memory-limit[] +tag::model-memory-limit-anomaly-jobs[] +The upper limit for model memory usage, checked on increasing values. +end::model-memory-limit-anomaly-jobs[] + tag::model-memory-limit-dfa[] The approximate maximum amount of memory resources that are permitted for analytical processing. The default value for {dfanalytics-jobs} is `1gb`. If @@ -852,6 +1008,19 @@ setting, an error occurs when you try to create {dfanalytics-jobs} that have <>. end::model-memory-limit-dfa[] +tag::model-memory-status[] +The status of the mathematical models, which can have one of the following +values: ++ +-- +* `ok`: The models stayed below the configured value. +* `soft_limit`: The models used more than 60% of the configured memory limit +and older unused models will be pruned to free up space. +* `hard_limit`: The models used more space than the configured memory limit. +As a result, not all incoming data was processed. +-- +end::model-memory-status[] + tag::model-plot-config[] This advanced configuration option stores model information along with the results. It provides a more detailed view into {anomaly-detect}. @@ -894,6 +1063,10 @@ The default value is `1`, which means snapshots that are one day (twenty-four ho older than the newest snapshot are deleted. end::model-snapshot-retention-days[] +tag::model-timestamp[] +The timestamp of the last record when the model stats were gathered. +end::model-timestamp[] + tag::multivariate-by-fields[] This functionality is reserved for internal use. It is not supported for use in customer environments and is not subject to the support SLA of official GA @@ -924,10 +1097,27 @@ improve diversity in the ensemble. Therefore, only override this if you are confident that the value you choose is appropriate for the data set. end::n-neighbors[] -tag::node[] +tag::node-address[] +The network address of the node. +end::node-address[] + +tag::node-datafeeds[] For started {dfeeds} only, this information pertains to the node upon which the {dfeed} is started. -end::node[] +end::node-datafeeds[] + +tag::node-ephemeral-id[] +The ephemeral ID of the node. +end::node-ephemeral-id[] + +tag::node-id[] +The unique identifier of the node. +end::node-id[] + +tag::node-jobs[] +Contains properties for the node that runs the job. This information is +available only for open jobs. +end::node-jobs[] tag::num-top-classes[] Defines the number of categories for which the predicted @@ -936,12 +1126,17 @@ total number of categories (in the {version} version of the {stack}, it's two) to predict then we will report all category probabilities. Defaults to 2. end::num-top-classes[] -tag::over-field-name[] -The field used to split the data. In particular, this property is used for -analyzing the splits with respect to the history of all splits. It is used for -finding unusual values in the population of all splits. For more information, -see {ml-docs}/ml-configuring-pop.html[Performing population analysis]. -end::over-field-name[] +tag::open-time[] +For open jobs only, the elapsed time for which the job has been open. +end::open-time[] + +tag::out-of-order-timestamp-count[] +The number of input documents that are out of time sequence and outside +of the latency window. This information is applicable only when you provide data +to the {anomaly-job} by using the <>. These out of +order documents are discarded, since jobs require time series data to be in +ascending chronological order. +end::out-of-order-timestamp-count[] tag::outlier-fraction[] Sets the proportion of the data set that is assumed to be outlying prior to @@ -949,6 +1144,13 @@ Sets the proportion of the data set that is assumed to be outlying prior to outliers and 95% are inliers. end::outlier-fraction[] +tag::over-field-name[] +The field used to split the data. In particular, this property is used for +analyzing the splits with respect to the history of all splits. It is used for +finding unusual values in the population of all splits. For more information, +see {ml-docs}/ml-configuring-pop.html[Performing population analysis]. +end::over-field-name[] + tag::partition-field-name[] The field used to segment the analysis. When you use this property, you have completely independent baselines for each value of this field. @@ -959,6 +1161,20 @@ Defines the name of the prediction field in the results. Defaults to `_prediction`. end::prediction-field-name[] +tag::processed-field-count[] +The total number of fields in all the documents that have been processed by the +{anomaly-job}. Only fields that are specified in the detector configuration +object contribute to this count. The timestamp is not included in this count. +end::processed-field-count[] + +tag::processed-record-count[] +The number of input documents that have been processed by the {anomaly-job}. +This value includes documents with missing fields, since they are nonetheless +analyzed. If you use {dfeeds} and have aggregations in your search query, the +`processed_record_count` is the number of aggregation results processed, not the +number of {es} documents. +end::processed-record-count[] + tag::randomize-seed[] Defines the seed to the random generator that is used to pick which documents will be used for training. By default it is randomly generated. Set it to a @@ -983,6 +1199,10 @@ multiple jobs running on the same node. For more information, see {ml-docs}/ml-delayed-data-detection.html[Handling delayed data]. end::query-delay[] +tag::rare-category-count[] +The number of categories that match just one categorized document. +end::rare-category-count[] + tag::renormalization-window-days[] Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or @@ -1076,6 +1296,12 @@ The configuration of how to source the analysis data. It requires an excluded from the destination. end::source-put-dfa[] +tag::sparse-bucket-count[] +The number of buckets that contained few data points compared to the expected +number of data points. If your data contains many sparse buckets, consider using +a longer `bucket_span`. +end::sparse-bucket-count[] + tag::standardization-enabled[] If `true`, then the following operation is performed on the columns before computing outlier scores: (x_i - mean(x_i)) / sd(x_i). Defaults to `true`. For @@ -1083,6 +1309,25 @@ more information, see https://en.wikipedia.org/wiki/Feature_scaling#Standardization_(Z-score_Normalization)[this wiki page about standardization]. end::standardization-enabled[] +tag::state-anomaly-job[] +The status of the {anomaly-job}, which can be one of the following values: ++ +-- +* `closed`: The job finished successfully with its model state persisted. The +job must be opened before it can accept further data. +* `closing`: The job close action is in progress and has not yet completed. A +closing job cannot accept further data. +* `failed`: The job did not finish successfully due to an error. This situation +can occur due to invalid input data, a fatal error occurring during the analysis, +or an external interaction such as the process being killed by the Linux out of +memory (OOM) killer. If the job had irrevocably failed, it must be force closed +and then deleted. If the {dfeed} can be corrected, the job can be closed and +then re-opened. +* `opened`: The job is available to receive and process data. +* `opening`: The job open action is in progress and has not yet completed. +-- +end::state-anomaly-job[] + tag::state-datafeed[] The status of the {dfeed}, which can be one of the following values: + @@ -1158,6 +1403,25 @@ that tokenizer but change the character or token filters, specify `"tokenizer": "ml_classic"` in your `categorization_analyzer`. end::tokenizer[] +tag::total-by-field-count[] +The number of `by` field values that were analyzed by the models. This value is +cumulative for all detectors in the job. +end::total-by-field-count[] + +tag::total-category-count[] +The number of categories created by categorization. +end::total-category-count[] + +tag::total-over-field-count[] +The number of `over` field values that were analyzed by the models. This value +is cumulative for all detectors in the job. +end::total-over-field-count[] + +tag::total-partition-field-count[] +The number of `partition` field values that were analyzed by the models. This +value is cumulative for all detectors in the job. +end::total-partition-field-count[] + tag::training-percent[] Defines what percentage of the eligible documents that will be used for training. Documents that are ignored by the analysis (for example diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 679a70a9424db..9e6fe18d754d0 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -278,6 +278,16 @@ See <>, <>, <>, This page was deleted. See <>. +[role="exclude",id="data-frames-settings"] +=== {transforms-cap} settings in Elasticsearch + +See <>. + +[role="exclude",id="general-data-frames-settings"] +==== General {transforms} settings + +See <>. + [role="exclude",id="ml-results-resource"] === Results resources diff --git a/docs/reference/rest-api/info.asciidoc b/docs/reference/rest-api/info.asciidoc index 044ca958450d3..441dc8f3478a5 100644 --- a/docs/reference/rest-api/info.asciidoc +++ b/docs/reference/rest-api/info.asciidoc @@ -71,6 +71,10 @@ Example response: "available" : true, "enabled" : true }, + "constant_keyword" : { + "available" : true, + "enabled" : true + }, "enrich" : { "available" : true, "enabled" : true diff --git a/docs/reference/settings/data-frames-settings.asciidoc b/docs/reference/settings/transform-settings.asciidoc similarity index 63% rename from docs/reference/settings/data-frames-settings.asciidoc rename to docs/reference/settings/transform-settings.asciidoc index a4568ae3b88df..6f60205951ea1 100644 --- a/docs/reference/settings/data-frames-settings.asciidoc +++ b/docs/reference/settings/transform-settings.asciidoc @@ -1,6 +1,6 @@ [role="xpack"] -[[data-frames-settings]] +[[transform-settings]] === {transforms-cap} settings in Elasticsearch [subs="attributes"] ++++ @@ -9,17 +9,30 @@ You do not need to configure any settings to use {transforms}. It is enabled by default. -All of these settings can be added to the `elasticsearch.yml` configuration file. -The dynamic settings can also be updated across a cluster with the +All of these settings can be added to the `elasticsearch.yml` configuration file. +The dynamic settings can also be updated across a cluster with the <>. -TIP: Dynamic settings take precedence over settings in the `elasticsearch.yml` +TIP: Dynamic settings take precedence over settings in the `elasticsearch.yml` file. [float] -[[general-data-frames-settings]] +[[general-transform-settings]] ==== General {transforms} settings +`node.transform`:: +Set to `true` to identify the node as a _transform node_. The default is `false` if +either `node.data` or `xpack.transform.enabled` is `false` for the node, and `true` otherwise. + ++ +If set to `false` in `elasticsearch.yml`, the node cannot run transforms. If set to +`true` but `xpack.transform.enabled` is set to `false`, the `node.transform` setting is +ignored and the node cannot run transforms. If you want to run transforms, there must be at +least one transform node in your cluster. + ++ +IMPORTANT: It is advised to use the `node.transform` setting to constrain the execution +of transforms to certain nodes instead of using `xpack.transform.enabled`. On dedicated +coordinating nodes or dedicated master nodes, disable the node.transform role. + `xpack.transform.enabled`:: Set to `true` (default) to enable {transforms} on the node. + + diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc index d0d26afe5feff..c00937dada732 100644 --- a/docs/reference/setup.asciidoc +++ b/docs/reference/setup.asciidoc @@ -51,8 +51,6 @@ include::settings/audit-settings.asciidoc[] include::settings/ccr-settings.asciidoc[] -include::settings/data-frames-settings.asciidoc[] - include::settings/ilm-settings.asciidoc[] include::settings/license-settings.asciidoc[] @@ -65,6 +63,8 @@ include::settings/security-settings.asciidoc[] include::settings/sql-settings.asciidoc[] +include::settings/transform-settings.asciidoc[] + include::settings/notification-settings.asciidoc[] include::setup/important-settings.asciidoc[] diff --git a/docs/reference/setup/important-settings/gc-logging.asciidoc b/docs/reference/setup/important-settings/gc-logging.asciidoc index 30df94d071fab..5d5f3ac589a07 100644 --- a/docs/reference/setup/important-settings/gc-logging.asciidoc +++ b/docs/reference/setup/important-settings/gc-logging.asciidoc @@ -1,7 +1,49 @@ [[gc-logging]] === GC logging -By default, Elasticsearch enables GC logs. These are configured in -<> and default to the same default location as the -Elasticsearch logs. The default configuration rotates the logs every 64 MB and +By default, {es} enables GC logs. These are configured in +<> and output to the same default location as +the {es} logs. The default configuration rotates the logs every 64 MB and can consume up to 2 GB of disk space. + +You can reconfigure JVM logging using the command line options described in +https://openjdk.java.net/jeps/158[JEP 158: Unified JVM Logging]. Unless you +change the default `jvm.options` file directly, the {es} default +configuration is applied in addition to your own settings. To disable the +default configuration, first disable logging by supplying the +`-Xlog:disable` option, then supply your own command line options. This +disables __all__ JVM logging, so be sure to review the available options +and enable everything that you require. + +To see further options not contained in the original JEP, see +https://docs.oracle.com/en/java/javase/13/docs/specs/man/java.html#enable-logging-with-the-jvm-unified-logging-framework[Enable +Logging with the JVM Unified Logging Framework]. + +==== Examples + +* Change the default GC log output location to `/opt/my-app/gc.log` by + creating `$ES_HOME/config/jvm.options.d/gc.options` with some sample + options: ++ +[source,shell] +-------------------------------------------- +# Turn off all previous logging configuratons +-Xlog:disable + +# Default settings from JEP 158, but with `utctime` instead of `uptime` to match the next line +-Xlog:all=warning:stderr:utctime,level,tags + +# Enable GC logging to a custom location with a variety of options +-Xlog:gc*,gc+age=trace,safepoint:file=/opt/my-app/gc.log:utctime,pid,tags:filecount=32,filesize=64m +-------------------------------------------- + +* Configure an {es} <> to send GC debug logs to + standard error (`stderr`). This lets the container orchestrator + handle the output. If using the `ES_JAVA_OPTS` environment variable, + specify: ++ +[source,sh] +-------------------------------------------- +MY_OPTS="-Xlog:disable -Xlog:all=warning:stderr:utctime,level,tags -Xlog:gc=debug:stderr:utctime" +docker run -e ES_JAVA_OPTS="$MY_OPTS" # etc +-------------------------------------------- diff --git a/docs/reference/setup/jvm-options.asciidoc b/docs/reference/setup/jvm-options.asciidoc index 3a619355edaa1..053b9cc8df851 100644 --- a/docs/reference/setup/jvm-options.asciidoc +++ b/docs/reference/setup/jvm-options.asciidoc @@ -3,7 +3,8 @@ You should rarely need to change Java Virtual Machine (JVM) options. If you do, the most likely change is setting the <>. The remainder of -this document explains in detail how to set JVM options. +this document explains in detail how to set JVM options. You can set options +either with `jvm.options` files or with the `ES_JAVA_OPTS` environment variable. The preferred method of setting or overriding JVM options is via JVM options files. When installing from the tar or zip distributions, the root `jvm.options` diff --git a/gradle.properties b/gradle.properties index f57b26f89c2fa..8c689aef44f84 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,5 +1,5 @@ org.gradle.warning.mode=none -org.gradle.daemon=true +org.gradle.parallel=true org.gradle.jvmargs=-Xmx3g -XX:+HeapDumpOnOutOfMemoryError -Xss2m options.forkOptions.memoryMaximumSize=2g diff --git a/modules/lang-expression/licenses/lucene-expressions-8.5.0-snapshot-b01d7cb.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index e0064436f6683..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b3d969d1d63fe9500fe5308b1a0c60d84d7bfa92 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-8.5.0-snapshot-c4475920b08.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..d892076210e87 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +48cb44f1dc8d3368d70581ffdbeab98ac5f5167f \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.5.0-snapshot-b01d7cb.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index 161fd4271c8fc..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d81b8f0ecdc67ba74049e65702582fcb8b605cfd \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.5.0-snapshot-c4475920b08.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..d90402c25e434 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +0748be5811dfe6725847d2e87890a990c58cc3de \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.5.0-snapshot-b01d7cb.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index 6d5372aea0de8..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -13e9fd71cb3992b148728fe4884b7a6d0e26ee78 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.5.0-snapshot-c4475920b08.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..a5003b5fb51a2 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +f693cd60ad8ca9b7d3082f7b9ee6054b9c819b48 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.5.0-snapshot-b01d7cb.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index 24d610869a78a..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58638e06cd1221b1d4de01705882922936ad69c9 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.5.0-snapshot-c4475920b08.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..d82cbaef39b0b --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +72c34e18af81ee1d18e9927fb95690fe056cbd4f \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.5.0-snapshot-b01d7cb.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index 6a7a5344197c8..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -346c87ed2ef2c6af49a434c568635a50cce64f2e \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.5.0-snapshot-c4475920b08.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..2b08ae87a25d9 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +655438348dcad9a98b5affa76caa3d67aa4bee51 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.5.0-snapshot-b01d7cb.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index f2870ea28384c..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -82ab2570c88053439912c5de5ed81cbe4337e450 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.5.0-snapshot-c4475920b08.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..9a051c8320ae1 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +b99147dad649fce0b0423e41f90c79e0f2fba2b7 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.5.0-snapshot-b01d7cb.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index c090ef531a18f..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ef0703ff5dc1b704ea3e58596b91e4b08c326d4c \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.5.0-snapshot-c4475920b08.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..92d6378f04c08 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +00ce3e23cf7aba8c1b3e777de92fd31ec1d4d814 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.5.0-snapshot-b01d7cb.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index e65f6726aad81..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9cf8ce62436a3768e18c431cf3be43f5e2672b24 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.5.0-snapshot-c4475920b08.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..427fa3cc39049 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +2b03f79d61517d8e6a8744dbd89e61ad661f6a62 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.5.0-snapshot-b01d7cb.jar.sha1 b/server/licenses/lucene-analyzers-common-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index 99013a493370b..0000000000000 --- a/server/licenses/lucene-analyzers-common-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d358d392a9825c1502499d0a649730208f03777 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.5.0-snapshot-c4475920b08.jar.sha1 b/server/licenses/lucene-analyzers-common-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..54cf1bac15e42 --- /dev/null +++ b/server/licenses/lucene-analyzers-common-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +1963afb27f340df8fc304d377971424832f4ce1a \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.5.0-snapshot-b01d7cb.jar.sha1 b/server/licenses/lucene-backward-codecs-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index dab7b857d30c9..0000000000000 --- a/server/licenses/lucene-backward-codecs-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -17a0b6597c5bbe6cc2fc6d46003cdf9bc937c593 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.5.0-snapshot-c4475920b08.jar.sha1 b/server/licenses/lucene-backward-codecs-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..3297020fb5caa --- /dev/null +++ b/server/licenses/lucene-backward-codecs-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +fdff4122e8b8a2dbbc9de24be6963e7d7e33b794 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.5.0-snapshot-b01d7cb.jar.sha1 b/server/licenses/lucene-core-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index 06a1050ba8e0e..0000000000000 --- a/server/licenses/lucene-core-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9c54e267bfa2cd1ef904dc6e35b38bbedda4c4b1 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.5.0-snapshot-c4475920b08.jar.sha1 b/server/licenses/lucene-core-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..3fd9e819ce181 --- /dev/null +++ b/server/licenses/lucene-core-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +ca406661129d35008411365d2b6e747dc39378af \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.5.0-snapshot-b01d7cb.jar.sha1 b/server/licenses/lucene-grouping-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index dd8ec235276cc..0000000000000 --- a/server/licenses/lucene-grouping-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a14fb545a10fcead07fbb0d801fdebce88192211 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.5.0-snapshot-c4475920b08.jar.sha1 b/server/licenses/lucene-grouping-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..11adaa175d1af --- /dev/null +++ b/server/licenses/lucene-grouping-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +db053d5861406393254c28f6e46767879b504bb3 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.5.0-snapshot-b01d7cb.jar.sha1 b/server/licenses/lucene-highlighter-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index 2eca5088e51cc..0000000000000 --- a/server/licenses/lucene-highlighter-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -25314c94f5cbcd8e360b74fb9980fc42e3641c94 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.5.0-snapshot-c4475920b08.jar.sha1 b/server/licenses/lucene-highlighter-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..99a2bbe095ea3 --- /dev/null +++ b/server/licenses/lucene-highlighter-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +f5520ee7145f5d1ef02c7dc87483255d81b5bc6c \ No newline at end of file diff --git a/server/licenses/lucene-join-8.5.0-snapshot-b01d7cb.jar.sha1 b/server/licenses/lucene-join-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index 14dfe4cd92e6c..0000000000000 --- a/server/licenses/lucene-join-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -efe41313b6733a0d2764052972c4e27a7ca7636f \ No newline at end of file diff --git a/server/licenses/lucene-join-8.5.0-snapshot-c4475920b08.jar.sha1 b/server/licenses/lucene-join-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..e05ece509f26e --- /dev/null +++ b/server/licenses/lucene-join-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +2d81c0a3473cc865e7c4858890b7fbfb869bfbf8 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.5.0-snapshot-b01d7cb.jar.sha1 b/server/licenses/lucene-memory-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index 249e29f05ffb3..0000000000000 --- a/server/licenses/lucene-memory-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f19fb11eb40f9d1bf77272de1cbd8c83e6463829 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.5.0-snapshot-c4475920b08.jar.sha1 b/server/licenses/lucene-memory-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..2d8258caac8ab --- /dev/null +++ b/server/licenses/lucene-memory-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +6d009afeb485307dce111afb8bb157ebbbb0f212 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.5.0-snapshot-b01d7cb.jar.sha1 b/server/licenses/lucene-misc-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index 1c7eb0b52973b..0000000000000 --- a/server/licenses/lucene-misc-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4295e60e1079c32dc19d1b070db3e62e04f07232 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.5.0-snapshot-c4475920b08.jar.sha1 b/server/licenses/lucene-misc-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..0b228e2a8fa12 --- /dev/null +++ b/server/licenses/lucene-misc-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +3a62908ec9eb6e826a56e697322c4c6b6c9a8573 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.5.0-snapshot-b01d7cb.jar.sha1 b/server/licenses/lucene-queries-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index ef9f506ce075b..0000000000000 --- a/server/licenses/lucene-queries-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1555f90440c10250e084d36c27e0da0b760d0296 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.5.0-snapshot-c4475920b08.jar.sha1 b/server/licenses/lucene-queries-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..c159ab983010d --- /dev/null +++ b/server/licenses/lucene-queries-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +d71d54ed6e0cf482ce16cf4f419441d83f646827 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.5.0-snapshot-b01d7cb.jar.sha1 b/server/licenses/lucene-queryparser-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index 6335bca87b4c1..0000000000000 --- a/server/licenses/lucene-queryparser-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4fd374ffc3ec3c82d62d0811d78b044833a8dfa1 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.5.0-snapshot-c4475920b08.jar.sha1 b/server/licenses/lucene-queryparser-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..10be987e9c169 --- /dev/null +++ b/server/licenses/lucene-queryparser-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +181915a7d21b73dff16591b20cdee22648e4181f \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.5.0-snapshot-b01d7cb.jar.sha1 b/server/licenses/lucene-sandbox-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index 09138d68ae584..0000000000000 --- a/server/licenses/lucene-sandbox-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9e47dbe97818dd6caccf03e2ead6282d5e86bd3c \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.5.0-snapshot-c4475920b08.jar.sha1 b/server/licenses/lucene-sandbox-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..fb143f9c9c3bf --- /dev/null +++ b/server/licenses/lucene-sandbox-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +d3d0bb76d9f4a5368d286a934615dbca7703b3d8 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.5.0-snapshot-b01d7cb.jar.sha1 b/server/licenses/lucene-spatial-extras-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index bd957a07de4df..0000000000000 --- a/server/licenses/lucene-spatial-extras-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bb9fa551d6d0929d5fd37c524c8c284de3f6786e \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.5.0-snapshot-c4475920b08.jar.sha1 b/server/licenses/lucene-spatial-extras-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..fd23c6f7e0389 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +95b9fd35e91a34c090ecf301d4dc29cabd198e6f \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.5.0-snapshot-b01d7cb.jar.sha1 b/server/licenses/lucene-spatial3d-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index 5332cf736a779..0000000000000 --- a/server/licenses/lucene-spatial3d-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -28c935b5eb84a85ac24432c8c630fa4b47f5fd54 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.5.0-snapshot-c4475920b08.jar.sha1 b/server/licenses/lucene-spatial3d-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..d4fc63b96d40f --- /dev/null +++ b/server/licenses/lucene-spatial3d-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +1c8da46c3a172830372dfc23e18e9151bb14562c \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.5.0-snapshot-b01d7cb.jar.sha1 b/server/licenses/lucene-suggest-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index f3f9ba3265b87..0000000000000 --- a/server/licenses/lucene-suggest-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -25089b0f516df58e95f345504607853e7ba0a35d \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.5.0-snapshot-c4475920b08.jar.sha1 b/server/licenses/lucene-suggest-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..51bc432029f38 --- /dev/null +++ b/server/licenses/lucene-suggest-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +b3ad5d3476ed85a529892962d057518555ccfcc9 \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index 5438eb2122ea0..3cdc83102983a 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -126,6 +126,9 @@ public enum Option { new IndicesOptions(EnumSet.of(Option.ALLOW_NO_INDICES), EnumSet.of(WildcardStates.OPEN, WildcardStates.CLOSED)); public static final IndicesOptions STRICT_EXPAND_OPEN_FORBID_CLOSED = new IndicesOptions(EnumSet.of(Option.ALLOW_NO_INDICES, Option.FORBID_CLOSED_INDICES), EnumSet.of(WildcardStates.OPEN)); + public static final IndicesOptions STRICT_EXPAND_OPEN_HIDDEN_FORBID_CLOSED = + new IndicesOptions(EnumSet.of(Option.ALLOW_NO_INDICES, Option.FORBID_CLOSED_INDICES), + EnumSet.of(WildcardStates.OPEN, WildcardStates.HIDDEN)); public static final IndicesOptions STRICT_EXPAND_OPEN_FORBID_CLOSED_IGNORE_THROTTLED = new IndicesOptions(EnumSet.of(Option.ALLOW_NO_INDICES, Option.FORBID_CLOSED_INDICES, Option.IGNORE_THROTTLED), EnumSet.of(WildcardStates.OPEN)); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index d68d69bf24e8c..b0b19164334c9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -680,8 +680,6 @@ public List resolve(Context context, List expressions) { return resolveEmptyOrTrivialWildcard(options, metaData); } - // TODO: Fix API to work with sets rather than lists since we need to convert to sets - // internally anyway. Set result = innerResolve(context, expressions, options, metaData); if (result == null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 9bac4976be212..5d9d71ba06823 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -118,7 +118,6 @@ public class MetaDataCreateIndexService { * These index patterns will be converted to hidden indices, at which point they should be removed from this list. */ private static final CharacterRunAutomaton DOT_INDICES_EXCLUSIONS = new CharacterRunAutomaton(Regex.simpleMatchToAutomaton( - ".watch-history-*", ".data-frame-notifications-*", ".transform-notifications-*" )); diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index 549f5ee755934..4e926f6c3ca11 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -439,7 +439,16 @@ private T get(Settings settings, boolean validate) { map = new HashMap<>(); while (it.hasNext()) { final Setting setting = it.next(); - map.put(setting, setting.get(settings, false)); // we have to disable validation or we will stack overflow + if (setting instanceof AffixSetting) { + // Collect all possible concrete settings + AffixSetting as = ((AffixSetting)setting); + for (String ns : as.getNamespaces(settings)) { + Setting s = as.getConcreteSettingForNamespace(ns); + map.put(s, s.get(settings, false)); + } + } else { + map.put(setting, setting.get(settings, false)); // we have to disable validation or we will stack overflow + } } } else { map = Collections.emptyMap(); diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/ConstantIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/ConstantIndexFieldData.java index 949c5eca5570f..98ad1d5c59283 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/ConstantIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/ConstantIndexFieldData.java @@ -91,6 +91,9 @@ public Collection getChildResources() { @Override public SortedSetDocValues getOrdinalsValues() { + if (value == null) { + return DocValues.emptySortedSet(); + } final BytesRef term = new BytesRef(value); final SortedDocValues sortedValues = new AbstractSortedDocValues() { diff --git a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java index abec51d0039f9..c7c8c428bc7e9 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java @@ -31,6 +31,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.BoostAttribute; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.Query; @@ -57,6 +58,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Set; @@ -261,7 +263,7 @@ public Query parse(Type type, String fieldName, Object value) throws IOException && (fieldType instanceof TextFieldMapper.TextFieldType || fieldType instanceof KeywordFieldMapper.KeywordFieldType)) { return builder.newPrefixQuery(term); } else { - return builder.newTermQuery(term); + return builder.newTermQuery(term, BoostAttribute.DEFAULT_BOOST); } } @@ -521,7 +523,7 @@ private SpanQuery createSpanQuery(TokenStream in, String field, boolean isPrefix } @Override - protected Query newTermQuery(Term term) { + protected Query newTermQuery(Term term, float boost) { Supplier querySupplier; if (fuzziness != null) { querySupplier = () -> { @@ -572,7 +574,8 @@ private Query analyzeTerm(String field, TokenStream stream, boolean isPrefix) th final Term term = new Term(field, termAtt.getBytesRef()); int lastOffset = offsetAtt.endOffset(); stream.end(); - return isPrefix && lastOffset == offsetAtt.endOffset() ? newPrefixQuery(term) : newTermQuery(term); + return isPrefix && lastOffset == offsetAtt.endOffset() ? + newPrefixQuery(term) : newTermQuery(term, BoostAttribute.DEFAULT_BOOST); } private void add(BooleanQuery.Builder q, String field, List current, BooleanClause.Occur operator, boolean isPrefix) { @@ -583,11 +586,14 @@ private void add(BooleanQuery.Builder q, String field, List current, Boole if (isPrefix) { q.add(newPrefixQuery(current.get(0)), operator); } else { - q.add(newTermQuery(current.get(0)), operator); + q.add(newTermQuery(current.get(0), BoostAttribute.DEFAULT_BOOST), operator); } } else { // We don't apply prefix on synonyms - q.add(newSynonymQuery(current.toArray(new Term[current.size()])), operator); + final TermAndBoost[] termAndBoosts = current.stream() + .map(t -> new TermAndBoost(t, BoostAttribute.DEFAULT_BOOST)) + .toArray(TermAndBoost[]::new); + q.add(newSynonymQuery(termAndBoosts), operator); } } @@ -698,10 +704,13 @@ public Query next() { Term[] terms = graph.getTerms(field, start); assert terms.length > 0; if (terms.length == 1) { - queryPos = usePrefix ? newPrefixQuery(terms[0]) : newTermQuery(terms[0]); + queryPos = usePrefix ? newPrefixQuery(terms[0]) : newTermQuery(terms[0], BoostAttribute.DEFAULT_BOOST); } else { // We don't apply prefix on synonyms - queryPos = newSynonymQuery(terms); + final TermAndBoost[] termAndBoosts = Arrays.stream(terms) + .map(t -> new TermAndBoost(t, BoostAttribute.DEFAULT_BOOST)) + .toArray(TermAndBoost[]::new); + queryPos = newSynonymQuery(termAndBoosts); } } if (queryPos != null) { diff --git a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java index 8e33425b3981e..fc7e215a55250 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java @@ -180,16 +180,16 @@ private class BlendedQueryBuilder extends MatchQueryBuilder { } @Override - protected Query newSynonymQuery(Term[] terms) { + protected Query newSynonymQuery(TermAndBoost[] terms) { BytesRef[] values = new BytesRef[terms.length]; for (int i = 0; i < terms.length; i++) { - values[i] = terms[i].bytes(); + values[i] = terms[i].term.bytes(); } return blendTerms(context, values, tieBreaker, lenient, blendedFields); } @Override - protected Query newTermQuery(Term term) { + protected Query newTermQuery(Term term, float boost) { return blendTerm(context, term.bytes(), tieBreaker, lenient, blendedFields); } diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java index a2527e040a3c1..119b1bb57dd65 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java @@ -28,6 +28,7 @@ import org.apache.lucene.queryparser.classic.Token; import org.apache.lucene.queryparser.classic.XQueryParser; import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BoostAttribute; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.FuzzyQuery; @@ -588,7 +589,7 @@ private Query getPossiblyAnalyzedPrefixQuery(String field, String termStr) throw if (isLastPos) { posQuery = currentFieldType.prefixQuery(plist.get(0), getMultiTermRewriteMethod(), context); } else { - posQuery = newTermQuery(new Term(field, plist.get(0))); + posQuery = newTermQuery(new Term(field, plist.get(0)), BoostAttribute.DEFAULT_BOOST); } } else if (isLastPos == false) { // build a synonym query for terms in the same position. diff --git a/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java index b8509ca2c112c..f4cac7e6c6d96 100644 --- a/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java @@ -26,6 +26,7 @@ import org.apache.lucene.queryparser.simple.SimpleQueryParser; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.BoostAttribute; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.MatchNoDocsQuery; @@ -104,7 +105,7 @@ public void setDefaultOperator(BooleanClause.Occur operator) { } @Override - protected Query newTermQuery(Term term) { + protected Query newTermQuery(Term term, float boost) { MappedFieldType ft = context.fieldMapper(term.field()); if (ft == null) { return newUnmappedFieldQuery(term.field()); @@ -259,7 +260,7 @@ private Query newPossiblyAnalyzedQuery(String field, String termStr, Analyzer an if (isLastPos) { posQuery = new PrefixQuery(new Term(field, plist.get(0))); } else { - posQuery = newTermQuery(new Term(field, plist.get(0))); + posQuery = newTermQuery(new Term(field, plist.get(0)), BoostAttribute.DEFAULT_BOOST); } } else if (isLastPos == false) { // build a synonym query for terms in the same position. diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index ab2177b51d684..da39bc0885a1e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -351,6 +351,10 @@ public void testIndexOptionsWildcardExpansion() { assertEquals(1, results.length); assertEquals("bar", results[0]); + results = indexNameExpressionResolver.concreteIndexNames(context, "*", "-foo", "*"); + assertEquals(3, results.length); + assertThat(results, arrayContainingInAnyOrder("bar", "foobar", "foo")); + results = indexNameExpressionResolver.concreteIndexNames(context, "-*"); assertEquals(0, results.length); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index 7d0d065f611ff..1ad07109b8d07 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -617,7 +617,6 @@ public void testValidateDotIndex() { public void testIndexNameExclusionsList() { // this test case should be removed when DOT_INDICES_EXCLUSIONS is empty List excludedNames = Arrays.asList( - ".watch-history-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT), ".data-frame-notifications-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT), ".transform-notifications-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT) ); diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 889bfadb10ec7..56d20d30d7f87 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -830,6 +830,59 @@ public void testAffixSettingsFailOnGet() { assertEquals("[\"testelement\"]", listAffixSetting.getDefaultRaw(Settings.EMPTY)); } + public void testAffixSettingsValidatorDependencies() { + Setting affix = Setting.affixKeySetting("abc.", "def", k -> Setting.intSetting(k, 10)); + Setting fix0 = Setting.intSetting("abc.tuv", 20, 0); + Setting fix1 = Setting.intSetting("abc.qrx", 20, 0, new Setting.Validator() { + @Override + public void validate(Integer value) {} + + String toString(Map, Object> s) { + return s.entrySet().stream().map(e -> e.getKey().getKey() + ":" + e.getValue().toString()).sorted() + .collect(Collectors.joining(",")); + } + + @Override + public void validate(Integer value, Map, Object> settings, boolean isPresent) { + if (settings.get(fix0).equals(fix0.getDefault(Settings.EMPTY))) { + settings.remove(fix0); + } + if (settings.size() == 1) { + throw new IllegalArgumentException(toString(settings)); + } else if (settings.size() == 2) { + throw new IllegalArgumentException(toString(settings)); + } + } + + @Override + public Iterator> settings() { + List> a = List.of(affix, fix0); + return a.iterator(); + } + }); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> fix1.get(Settings.builder().put("abc.1.def", 11).put("abc.2.def", 12).put("abc.qrx", 11).build())); + assertThat(e.getMessage(), is("abc.1.def:11,abc.2.def:12")); + + e = expectThrows(IllegalArgumentException.class, + () -> fix1.get(Settings.builder().put("abc.3.def", 13).put("abc.qrx", 20).build())); + assertThat(e.getMessage(), is("abc.3.def:13")); + + e = expectThrows(IllegalArgumentException.class, + () -> fix1.get(Settings.builder().put("abc.4.def", 14).put("abc.qrx", 20).put("abc.tuv", 50).build())); + assertThat(e.getMessage(), is("abc.4.def:14,abc.tuv:50")); + + assertEquals( + fix1.get(Settings.builder() + .put("abc.3.def", 13).put("abc.1.def", 11).put("abc.2.def", 12).put("abc.qrx", 20) + .build()), + Integer.valueOf(20) + ); + + assertEquals(fix1.get(Settings.builder().put("abc.qrx", 30).build()), Integer.valueOf(30)); + } + public void testMinMaxInt() { Setting integerSetting = Setting.intSetting("foo.bar", 1, 0, 10, Property.NodeScope); try { diff --git a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java index be284e389f021..e1b253bbd636c 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java @@ -193,7 +193,8 @@ public abstract class AnalysisFactoryTestCase extends ESTestCase { // LUCENE-8332 entry("concatenategraph", Void.class), // LUCENE-8936 - entry("spanishminimalstem", Void.class)); + entry("spanishminimalstem", Void.class), + entry("delimitedboost", Void.class)); static final Map> KNOWN_CHARFILTERS = Map.of( "htmlstrip", MovedToAnalysisCommon.class, diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 24ab6803e6e7c..3116792c502d6 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1146,7 +1146,8 @@ protected final IndexResponse index(String index, String id, String source) { protected final RefreshResponse refresh(String... indices) { waitForRelocation(); // TODO RANDOMIZE with flush? - RefreshResponse actionGet = client().admin().indices().prepareRefresh(indices).execute().actionGet(); + RefreshResponse actionGet = client().admin().indices().prepareRefresh(indices) + .setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_HIDDEN_FORBID_CLOSED).execute().actionGet(); assertNoFailures(actionGet); return actionGet; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index 74677f27b039c..1d90ef9488789 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -626,6 +626,10 @@ public boolean isDataScienceAllowed() { return allowForAllLicenses(); } + public boolean isConstantKeywordAllowed() { + return allowForAllLicenses(); + } + /** * @return true if security is available to be used with the current license type */ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java index f2c682bd8a21b..c18cd6cb2850d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java @@ -55,6 +55,8 @@ public final class XPackField { public static final String ANALYTICS = "analytics"; /** Name constant for the enrich plugin. */ public static final String ENRICH = "enrich"; + /** Name constant for the constant-keyword plugin. */ + public static final String CONSTANT_KEYWORD = "constant_keyword"; private XPackField() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureAction.java index c900f353d55d1..9dac2212534b1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureAction.java @@ -42,10 +42,11 @@ public class XPackInfoFeatureAction extends ActionType public static final XPackInfoFeatureAction SPATIAL = new XPackInfoFeatureAction(XPackField.SPATIAL); public static final XPackInfoFeatureAction ANALYTICS = new XPackInfoFeatureAction(XPackField.ANALYTICS); public static final XPackInfoFeatureAction ENRICH = new XPackInfoFeatureAction(XPackField.ENRICH); + public static final XPackInfoFeatureAction CONSTANT_KEYWORD = new XPackInfoFeatureAction(XPackField.CONSTANT_KEYWORD); public static final List ALL = Arrays.asList( SECURITY, MONITORING, WATCHER, GRAPH, MACHINE_LEARNING, LOGSTASH, EQL, SQL, ROLLUP, INDEX_LIFECYCLE, SNAPSHOT_LIFECYCLE, CCR, - TRANSFORM, FLATTENED, VECTORS, VOTING_ONLY, FROZEN_INDICES, SPATIAL, ANALYTICS, ENRICH + TRANSFORM, FLATTENED, VECTORS, VOTING_ONLY, FROZEN_INDICES, SPATIAL, ANALYTICS, ENRICH, CONSTANT_KEYWORD ); private XPackInfoFeatureAction(String name) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/time/TimeUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/time/TimeUtils.java index e345feb59b04e..01667f8a48160 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/time/TimeUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/time/TimeUtils.java @@ -22,6 +22,10 @@ private TimeUtils() { // Do nothing } + /** + * @deprecated Please use {@link #parseTimeFieldToInstant(XContentParser, String)} instead. + */ + @Deprecated public static Date parseTimeField(XContentParser parser, String fieldName) throws IOException { if (parser.currentToken() == XContentParser.Token.VALUE_NUMBER) { return new Date(parser.longValue()); @@ -36,7 +40,7 @@ public static Instant parseTimeFieldToInstant(XContentParser parser, String fiel if (parser.currentToken() == XContentParser.Token.VALUE_NUMBER) { return Instant.ofEpochMilli(parser.longValue()); } else if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { - return Instant.ofEpochMilli(dateStringToEpoch(parser.text())); + return Instant.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(parser.text())); } throw new IllegalArgumentException( "unexpected token [" + parser.currentToken() + "] for [" + fieldName + "]"); @@ -54,6 +58,7 @@ public static Instant parseTimeFieldToInstant(XContentParser parser, String fiel * @return The epoch time in milliseconds or -1 if the date cannot be * parsed. */ + @Deprecated public static long dateStringToEpoch(String date) { try { long epoch = Long.parseLong(date); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlStatsIndex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlStatsIndex.java new file mode 100644 index 0000000000000..36cf890362428 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlStatsIndex.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.xpack.core.ml.utils.MlIndexAndAlias; +import org.elasticsearch.xpack.core.template.TemplateUtils; + +/** + * Describes the indices where ML is storing various stats about the users jobs. + */ +public class MlStatsIndex { + + public static final String TEMPLATE_NAME = ".ml-stats"; + + private static final String MAPPINGS_VERSION_VARIABLE = "xpack.ml.version"; + + private MlStatsIndex() {} + + public static String mapping() { + return TemplateUtils.loadTemplate("/org/elasticsearch/xpack/core/ml/stats_index_mappings.json", + Version.CURRENT.toString(), MAPPINGS_VERSION_VARIABLE); + } + + public static String indexPattern() { + return TEMPLATE_NAME + "-*"; + } + + public static String writeAlias() { + return ".ml-stats-write"; + } + + /** + * Creates the first concrete .ml-stats-000001 index (if necessary) + * Creates the .ml-stats-write alias for that index. + * The listener will be notified with a boolean to indicate if the index was created because of this call, + * but unless there is a failure after this method returns the index and alias should be present. + */ + public static void createStatsIndexAndAliasIfNecessary(Client client, ClusterState state, IndexNameExpressionResolver resolver, + ActionListener listener) { + MlIndexAndAlias.createIndexAndAliasIfNecessary(client, state, resolver, TEMPLATE_NAME, writeAlias(), listener); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsStatsAction.java index 80dbf8bed1cef..c0590797ead4d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsStatsAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; +import org.elasticsearch.xpack.core.ml.dataframe.stats.MemoryUsage; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.PhaseProgress; @@ -163,17 +164,21 @@ public static class Stats implements ToXContentObject, Writeable { */ private final List progress; + @Nullable + private final MemoryUsage memoryUsage; + @Nullable private final DiscoveryNode node; @Nullable private final String assignmentExplanation; public Stats(String id, DataFrameAnalyticsState state, @Nullable String failureReason, List progress, - @Nullable DiscoveryNode node, @Nullable String assignmentExplanation) { + @Nullable MemoryUsage memoryUsage, @Nullable DiscoveryNode node, @Nullable String assignmentExplanation) { this.id = Objects.requireNonNull(id); this.state = Objects.requireNonNull(state); this.failureReason = failureReason; this.progress = Objects.requireNonNull(progress); + this.memoryUsage = memoryUsage; this.node = node; this.assignmentExplanation = assignmentExplanation; } @@ -187,6 +192,11 @@ public Stats(StreamInput in) throws IOException { } else { progress = in.readList(PhaseProgress::new); } + if (in.getVersion().onOrAfter(Version.V_7_7_0)) { + memoryUsage = in.readOptionalWriteable(MemoryUsage::new); + } else { + memoryUsage = null; + } node = in.readOptionalWriteable(DiscoveryNode::new); assignmentExplanation = in.readOptionalString(); } @@ -240,6 +250,11 @@ public List getProgress() { return progress; } + @Nullable + public MemoryUsage getMemoryUsage() { + return memoryUsage; + } + public DiscoveryNode getNode() { return node; } @@ -267,6 +282,9 @@ public XContentBuilder toUnwrappedXContent(XContentBuilder builder) throws IOExc if (progress != null) { builder.field("progress", progress); } + if (memoryUsage != null) { + builder.field("memory_usage", memoryUsage); + } if (node != null) { builder.startObject("node"); builder.field("id", node.getId()); @@ -297,6 +315,9 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeList(progress); } + if (out.getVersion().onOrAfter(Version.V_7_7_0)) { + out.writeOptionalWriteable(memoryUsage); + } out.writeOptionalWriteable(node); out.writeOptionalString(assignmentExplanation); } @@ -329,7 +350,7 @@ private void writeProgressToLegacy(StreamOutput out) throws IOException { @Override public int hashCode() { - return Objects.hash(id, state, failureReason, progress, node, assignmentExplanation); + return Objects.hash(id, state, failureReason, progress, memoryUsage, node, assignmentExplanation); } @Override @@ -345,6 +366,7 @@ public boolean equals(Object obj) { && Objects.equals(this.state, other.state) && Objects.equals(this.failureReason, other.failureReason) && Objects.equals(this.progress, other.progress) + && Objects.equals(this.memoryUsage, other.memoryUsage) && Objects.equals(this.node, other.node) && Objects.equals(this.assignmentExplanation, other.assignmentExplanation); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/stats/MemoryUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/stats/MemoryUsage.java new file mode 100644 index 0000000000000..5131d88d95924 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/stats/MemoryUsage.java @@ -0,0 +1,117 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.dataframe.stats; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.common.time.TimeUtils; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; + +import java.io.IOException; +import java.time.Instant; +import java.util.Objects; + +public class MemoryUsage implements Writeable, ToXContentObject { + + public static final String TYPE_VALUE = "analytics_memory_usage"; + + public static final ParseField TYPE = new ParseField("type"); + public static final ParseField JOB_ID = new ParseField("job_id"); + public static final ParseField TIMESTAMP = new ParseField("timestamp"); + public static final ParseField PEAK_USAGE_BYTES = new ParseField("peak_usage_bytes"); + + public static final ConstructingObjectParser STRICT_PARSER = createParser(false); + public static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + + private static ConstructingObjectParser createParser(boolean ignoreUnknownFields) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(TYPE_VALUE, + ignoreUnknownFields, a -> new MemoryUsage((String) a[0], (Instant) a[1], (long) a[2])); + + parser.declareString((bucket, s) -> {}, TYPE); + parser.declareString(ConstructingObjectParser.constructorArg(), JOB_ID); + parser.declareField(ConstructingObjectParser.constructorArg(), + p -> TimeUtils.parseTimeFieldToInstant(p, TIMESTAMP.getPreferredName()), + TIMESTAMP, + ObjectParser.ValueType.VALUE); + parser.declareLong(ConstructingObjectParser.constructorArg(), PEAK_USAGE_BYTES); + return parser; + } + + private final String jobId; + private final Instant timestamp; + private final long peakUsageBytes; + + public MemoryUsage(String jobId, Instant timestamp, long peakUsageBytes) { + this.jobId = Objects.requireNonNull(jobId); + // We intend to store this timestamp in millis granularity. Thus we're rounding here to ensure + // internal representation matches toXContent + this.timestamp = Instant.ofEpochMilli(ExceptionsHelper.requireNonNull(timestamp, TIMESTAMP).toEpochMilli()); + this.peakUsageBytes = peakUsageBytes; + } + + public MemoryUsage(StreamInput in) throws IOException { + jobId = in.readString(); + timestamp = in.readInstant(); + peakUsageBytes = in.readVLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(jobId); + out.writeInstant(timestamp); + out.writeVLong(peakUsageBytes); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (params.paramAsBoolean(ToXContentParams.FOR_INTERNAL_STORAGE, false)) { + builder.field(TYPE.getPreferredName(), TYPE_VALUE); + builder.field(JOB_ID.getPreferredName(), jobId); + } + builder.timeField(TIMESTAMP.getPreferredName(), TIMESTAMP.getPreferredName() + "_string", timestamp.toEpochMilli()); + builder.field(PEAK_USAGE_BYTES.getPreferredName(), peakUsageBytes); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (o == this) return true; + if (o == null || getClass() != o.getClass()) return false; + + MemoryUsage other = (MemoryUsage) o; + return Objects.equals(jobId, other.jobId) + && Objects.equals(timestamp, other.timestamp) + && peakUsageBytes == other.peakUsageBytes; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, timestamp, peakUsageBytes); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + public String documentId(String jobId) { + return documentIdPrefix(jobId) + timestamp.toEpochMilli(); + } + + public static String documentIdPrefix(String jobId) { + return TYPE_VALUE + "_" + jobId + "_"; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java index 63859a6a171ac..306ea243120e5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java @@ -5,29 +5,18 @@ */ package org.elasticsearch.xpack.core.ml.job.persistence; -import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.alias.Alias; -import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.MlIndexAndAlias; import org.elasticsearch.xpack.core.template.TemplateUtils; -import java.util.Arrays; import java.util.Comparator; import java.util.function.Predicate; import java.util.regex.Pattern; -import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; - /** * Methods for handling index naming related functions */ @@ -119,61 +108,8 @@ public static String configIndexName() { */ public static void createStateIndexAndAliasIfNecessary(Client client, ClusterState state, IndexNameExpressionResolver resolver, final ActionListener finalListener) { - - if (state.getMetaData().getAliasAndIndexLookup().containsKey(jobStateIndexWriteAlias())) { - finalListener.onResponse(false); - return; - } - - final ActionListener createAliasListener = ActionListener.wrap( - concreteIndexName -> { - final IndicesAliasesRequest request = client.admin() - .indices() - .prepareAliases() - .addAlias(concreteIndexName, jobStateIndexWriteAlias()) - .request(); - executeAsyncWithOrigin(client.threadPool().getThreadContext(), - ML_ORIGIN, - request, - ActionListener.wrap( - resp -> finalListener.onResponse(resp.isAcknowledged()), - finalListener::onFailure), - client.admin().indices()::aliases); - }, - finalListener::onFailure - ); - - String[] stateIndices = resolver.concreteIndexNames(state, - IndicesOptions.lenientExpandOpen(), - jobStateIndexPattern()); - if (stateIndices.length > 0) { - String latestStateIndex = Arrays.stream(stateIndices).max(STATE_INDEX_NAME_COMPARATOR).get(); - createAliasListener.onResponse(latestStateIndex); - } else { - // The initial index name must be suitable for rollover functionality. - String initialJobStateIndex = AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX + "-000001"; - CreateIndexRequest createIndexRequest = client.admin() - .indices() - .prepareCreate(initialJobStateIndex) - .addAlias(new Alias(jobStateIndexWriteAlias())) - .request(); - executeAsyncWithOrigin(client.threadPool().getThreadContext(), - ML_ORIGIN, - createIndexRequest, - ActionListener.wrap( - createIndexResponse -> finalListener.onResponse(true), - createIndexFailure -> { - // If it was created between our last check, and this request being handled, we should add the alias - // Adding an alias that already exists is idempotent. So, no need to double check if the alias exists - // as well. - if (ExceptionsHelper.unwrapCause(createIndexFailure) instanceof ResourceAlreadyExistsException) { - createAliasListener.onResponse(initialJobStateIndex); - } else { - finalListener.onFailure(createIndexFailure); - } - }), - client.admin().indices()::create); - } + MlIndexAndAlias.createIndexAndAliasIfNecessary(client, state, resolver, + AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX, AnomalyDetectorsIndex.jobStateIndexWriteAlias(), finalListener); } public static String resultsMapping() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java new file mode 100644 index 0000000000000..79b9987d35193 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.utils; + +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; + +import java.util.Arrays; +import java.util.Comparator; +import java.util.function.Predicate; +import java.util.regex.Pattern; + +import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; + +/** + * Utils to create an ML index with alias ready for rollover with a 6-digit suffix + */ +public final class MlIndexAndAlias { + + // Visible for testing + static final Comparator INDEX_NAME_COMPARATOR = new Comparator<>() { + + private final Predicate HAS_SIX_DIGIT_SUFFIX = Pattern.compile("\\d{6}").asMatchPredicate(); + + @Override + public int compare(String index1, String index2) { + String[] index1Parts = index1.split("-"); + String index1Suffix = index1Parts[index1Parts.length - 1]; + boolean index1HasSixDigitsSuffix = HAS_SIX_DIGIT_SUFFIX.test(index1Suffix); + String[] index2Parts = index2.split("-"); + String index2Suffix = index2Parts[index2Parts.length - 1]; + boolean index2HasSixDigitsSuffix = HAS_SIX_DIGIT_SUFFIX.test(index2Suffix); + if (index1HasSixDigitsSuffix && index2HasSixDigitsSuffix) { + return index1Suffix.compareTo(index2Suffix); + } else if (index1HasSixDigitsSuffix != index2HasSixDigitsSuffix) { + return Boolean.compare(index1HasSixDigitsSuffix, index2HasSixDigitsSuffix); + } else { + return index1.compareTo(index2); + } + } + }; + + private MlIndexAndAlias() {} + + /** + * Creates the first index with a name of the given {@code indexPatternPrefix} followed by "-000001", if the index is missing. + * Adds an {@code alias} to that index if it was created, + * or to the index with the highest suffix if the index did not have to be created. + * The listener is notified with a {@code boolean} that informs whether the index or the alias were created. + */ + public static void createIndexAndAliasIfNecessary(Client client, ClusterState clusterState, IndexNameExpressionResolver resolver, + String indexPatternPrefix, String alias, ActionListener listener) { + if (clusterState.getMetaData().getAliasAndIndexLookup().containsKey(alias)) { + listener.onResponse(false); + return; + } + + final ActionListener createAliasListener = ActionListener.wrap( + concreteIndexName -> { + final IndicesAliasesRequest request = client.admin() + .indices() + .prepareAliases() + .addAlias(concreteIndexName, alias) + .request(); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), + ML_ORIGIN, + request, + ActionListener.wrap( + resp -> listener.onResponse(resp.isAcknowledged()), + listener::onFailure), + client.admin().indices()::aliases); + }, + listener::onFailure + ); + + String[] stateIndices = resolver.concreteIndexNames(clusterState, + IndicesOptions.lenientExpandOpen(), indexPatternPrefix + "*"); + if (stateIndices.length > 0) { + String latestStateIndex = Arrays.stream(stateIndices).max(INDEX_NAME_COMPARATOR).get(); + createAliasListener.onResponse(latestStateIndex); + } else { + // The initial index name must be suitable for rollover functionality. + String initialJobStateIndex = indexPatternPrefix + "-000001"; + CreateIndexRequest createIndexRequest = client.admin() + .indices() + .prepareCreate(initialJobStateIndex) + .addAlias(new Alias(alias)) + .request(); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), + ML_ORIGIN, + createIndexRequest, + ActionListener.wrap( + createIndexResponse -> listener.onResponse(true), + createIndexFailure -> { + // If it was created between our last check, and this request being handled, we should add the alias + // Adding an alias that already exists is idempotent. So, no need to double check if the alias exists + // as well. + if (ExceptionsHelper.unwrapCause(createIndexFailure) instanceof ResourceAlreadyExistsException) { + createAliasListener.onResponse(initialJobStateIndex); + } else { + listener.onFailure(createIndexFailure); + } + }), + client.admin().indices()::create); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfig.java index b59a7912cf701..b3478e89520c0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfig.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.license.RemoteClusterLicenseChecker; import org.elasticsearch.xpack.core.transform.utils.ExceptionsHelper; import java.io.IOException; @@ -98,6 +99,10 @@ public boolean isValid() { return queryConfig.isValid(); } + public boolean requiresRemoteCluster() { + return Arrays.stream(index).anyMatch(RemoteClusterLicenseChecker::isRemoteIndex); + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(index); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformTaskParams.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformTaskParams.java index ea5d97e049e08..6b7965b74df44 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformTaskParams.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformTaskParams.java @@ -25,29 +25,35 @@ public class TransformTaskParams extends AbstractDiffable i public static final String NAME = TransformField.TASK_NAME; public static final ParseField FREQUENCY = TransformField.FREQUENCY; + public static final ParseField REQUIRES_REMOTE = new ParseField("requires_remote"); private final String transformId; private final Version version; private final TimeValue frequency; + private final Boolean requiresRemote; public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, - a -> new TransformTaskParams((String) a[0], (String) a[1], (String) a[2])); + a -> new TransformTaskParams((String) a[0], (String) a[1], (String) a[2], (Boolean) a[3])); static { PARSER.declareString(ConstructingObjectParser.constructorArg(), TransformField.ID); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), TransformField.VERSION); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FREQUENCY); + PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), REQUIRES_REMOTE); } - private TransformTaskParams(String transformId, String version, String frequency) { + private TransformTaskParams(String transformId, String version, String frequency, Boolean remote) { this(transformId, version == null ? null : Version.fromString(version), - frequency == null ? null : TimeValue.parseTimeValue(frequency, FREQUENCY.getPreferredName())); + frequency == null ? null : TimeValue.parseTimeValue(frequency, FREQUENCY.getPreferredName()), + remote == null ? false : remote.booleanValue() + ); } - public TransformTaskParams(String transformId, Version version, TimeValue frequency) { + public TransformTaskParams(String transformId, Version version, TimeValue frequency, boolean remote) { this.transformId = transformId; this.version = version == null ? Version.V_7_2_0 : version; this.frequency = frequency; + this.requiresRemote = remote; } public TransformTaskParams(StreamInput in) throws IOException { @@ -62,6 +68,11 @@ public TransformTaskParams(StreamInput in) throws IOException { } else { this.frequency = null; } + if (in.getVersion().onOrAfter(Version.V_8_0_0)) { // todo: V_7_7_0 + this.requiresRemote = in.readBoolean(); + } else { + this.requiresRemote = false; + } } @Override @@ -83,6 +94,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_7_3_0)) { out.writeOptionalTimeValue(frequency); } + if (out.getVersion().onOrAfter(Version.V_8_0_0)) { // todo: V_7_7_0 + out.writeBoolean(requiresRemote); + } } @Override @@ -93,6 +107,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (frequency != null) { builder.field(FREQUENCY.getPreferredName(), frequency.getStringRep()); } + builder.field(REQUIRES_REMOTE.getPreferredName(), requiresRemote); builder.endObject(); return builder; } @@ -109,6 +124,10 @@ public TimeValue getFrequency() { return frequency; } + public boolean requiresRemote() { + return requiresRemote; + } + public static TransformTaskParams fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } @@ -127,11 +146,12 @@ public boolean equals(Object other) { return Objects.equals(this.transformId, that.transformId) && Objects.equals(this.version, that.version) - && Objects.equals(this.frequency, that.frequency); + && Objects.equals(this.frequency, that.frequency) + && this.requiresRemote == that.requiresRemote; } @Override public int hashCode() { - return Objects.hash(transformId, version, frequency); + return Objects.hash(transformId, version, frequency, requiresRemote); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java index 9eaf0237ffdc1..2c3442956d113 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java @@ -15,8 +15,9 @@ public final class WatcherIndexTemplateRegistryField { // version 8: fix slack attachment property not to be dynamic, causing field type issues // version 9: add a user field defining which user executed the watch // version 10: add support for foreach path in actions + // version 11: watch history indices are hidden // Note: if you change this, also inform the kibana team around the watcher-ui - public static final int INDEX_TEMPLATE_VERSION = 10; + public static final int INDEX_TEMPLATE_VERSION = 11; public static final String HISTORY_TEMPLATE_NAME = ".watch-history-" + INDEX_TEMPLATE_VERSION; public static final String HISTORY_TEMPLATE_NAME_NO_ILM = ".watch-history-no-ilm-" + INDEX_TEMPLATE_VERSION; public static final String TRIGGERED_TEMPLATE_NAME = ".triggered_watches"; diff --git a/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/core/ml/stats_index_mappings.json b/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/core/ml/stats_index_mappings.json new file mode 100644 index 0000000000000..5a1215057bb22 --- /dev/null +++ b/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/core/ml/stats_index_mappings.json @@ -0,0 +1,21 @@ +{ + "_doc": { + "_meta": { + "version" : "${xpack.ml.version}" + }, + "properties" : { + "type" : { + "type" : "keyword" + }, + "job_id" : { + "type" : "keyword" + }, + "timestamp" : { + "type" : "date" + }, + "peak_usage_bytes" : { + "type" : "long" + } + } + } +} diff --git a/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/core/ml/stats_index_template.json b/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/core/ml/stats_index_template.json new file mode 100644 index 0000000000000..1c694d9d1a7e5 --- /dev/null +++ b/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/core/ml/stats_index_template.json @@ -0,0 +1,15 @@ +{ + "order" : 0, + "version" : ${xpack.ml.version.id}, + "index_patterns" : [ + ".ml-stats-*" + ], + "settings": { + "index" : { + "number_of_shards" : "1", + "auto_expand_replicas" : "0-1", + "hidden": true + } + }, + "mappings" : ${xpack.ml.stats.mappings} +} diff --git a/x-pack/plugin/core/src/main/resources/watch-history-no-ilm.json b/x-pack/plugin/core/src/main/resources/watch-history-no-ilm.json index 5b3186a9a6c12..7b5910508392d 100644 --- a/x-pack/plugin/core/src/main/resources/watch-history-no-ilm.json +++ b/x-pack/plugin/core/src/main/resources/watch-history-no-ilm.json @@ -5,6 +5,7 @@ "index.number_of_shards": 1, "index.number_of_replicas": 0, "index.auto_expand_replicas": "0-1", + "index.hidden": true, "index.format": 6 }, "mappings": { diff --git a/x-pack/plugin/core/src/main/resources/watch-history.json b/x-pack/plugin/core/src/main/resources/watch-history.json index d35ddd9afd24c..109ca95d75190 100644 --- a/x-pack/plugin/core/src/main/resources/watch-history.json +++ b/x-pack/plugin/core/src/main/resources/watch-history.json @@ -6,6 +6,7 @@ "index.number_of_replicas": 0, "index.auto_expand_replicas": "0-1", "index.lifecycle.name": "watch-history-ilm-policy", + "index.hidden": true, "index.format": 6 }, "mappings": { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/test/http/MockWebServer.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/http/MockWebServer.java index fc3e37c87db2d..5da4eaa849245 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/test/http/MockWebServer.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/http/MockWebServer.java @@ -268,10 +268,12 @@ public void close() { logger.debug("[{}:{}] Counting down all latches before terminating executor", getHostName(), getPort()); latches.forEach(CountDownLatch::countDown); - if (server.getExecutor() instanceof ExecutorService) { - terminate((ExecutorService) server.getExecutor()); + if (server != null) { + if (server.getExecutor() instanceof ExecutorService) { + terminate((ExecutorService) server.getExecutor()); + } + server.stop(0); } - server.stop(0); } /** diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsStatsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsStatsActionResponseTests.java index 84c77a5fb38b1..f5dab116b38e1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsStatsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsStatsActionResponseTests.java @@ -11,6 +11,8 @@ import org.elasticsearch.xpack.core.ml.action.GetDataFrameAnalyticsStatsAction.Response; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfigTests; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; +import org.elasticsearch.xpack.core.ml.dataframe.stats.MemoryUsage; +import org.elasticsearch.xpack.core.ml.dataframe.stats.MemoryUsageTests; import org.elasticsearch.xpack.core.ml.utils.PhaseProgress; import java.util.ArrayList; @@ -27,8 +29,9 @@ public static Response randomResponse(int listSize) { List progress = new ArrayList<>(progressSize); IntStream.of(progressSize).forEach(progressIndex -> progress.add( new PhaseProgress(randomAlphaOfLength(10), randomIntBetween(0, 100)))); + MemoryUsage memoryUsage = randomBoolean() ? null : MemoryUsageTests.createRandom(); Response.Stats stats = new Response.Stats(DataFrameAnalyticsConfigTests.randomValidId(), - randomFrom(DataFrameAnalyticsState.values()), failureReason, progress, null, randomAlphaOfLength(20)); + randomFrom(DataFrameAnalyticsState.values()), failureReason, progress, memoryUsage, null, randomAlphaOfLength(20)); analytics.add(stats); } return new Response(new QueryPage<>(analytics, analytics.size(), GetDataFrameAnalyticsAction.Response.RESULTS_FIELD)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/stats/MemoryUsageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/stats/MemoryUsageTests.java new file mode 100644 index 0000000000000..44ce79b98c076 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/stats/MemoryUsageTests.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.dataframe.stats; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; +import org.junit.Before; + +import java.io.IOException; +import java.time.Instant; +import java.util.Collections; + +public class MemoryUsageTests extends AbstractSerializingTestCase { + + private boolean lenient; + + @Before + public void chooseStrictOrLenient() { + lenient = randomBoolean(); + } + + @Override + protected boolean supportsUnknownFields() { + return lenient; + } + + @Override + protected MemoryUsage doParseInstance(XContentParser parser) throws IOException { + return lenient ? MemoryUsage.LENIENT_PARSER.parse(parser, null) : MemoryUsage.STRICT_PARSER.parse(parser, null); + } + + @Override + protected ToXContent.Params getToXContentParams() { + return new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.FOR_INTERNAL_STORAGE, "true")); + } + + public static MemoryUsage createRandom() { + return new MemoryUsage(randomAlphaOfLength(10), Instant.now(), randomNonNegativeLong()); + } + + @Override + protected Writeable.Reader instanceReader() { + return MemoryUsage::new; + } + + @Override + protected MemoryUsage createTestInstance() { + return createRandom(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndexTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAliasTests.java similarity index 75% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndexTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAliasTests.java index ce5b038728651..80a55394a4003 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndexTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAliasTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.ml.job.persistence; +package org.elasticsearch.xpack.core.ml.utils; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -56,11 +56,12 @@ import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; -public class AnomalyDetectorsIndexTests extends ESTestCase { +public class MlIndexAndAliasTests extends ESTestCase { - private static final String LEGACY_ML_STATE = ".ml-state"; - private static final String INITIAL_ML_STATE = ".ml-state-000001"; - private static final String ML_STATE_WRITE_ALIAS = ".ml-state-write"; + private static final String TEST_INDEX_PREFIX = "test"; + private static final String TEST_INDEX_ALIAS = "test-alias"; + private static final String LEGACY_INDEX_WITHOUT_SUFFIX = TEST_INDEX_PREFIX; + private static final String FIRST_CONCRETE_INDEX = "test-000001"; private ThreadPool threadPool; private IndicesAdminClient indicesAdminClient; @@ -77,9 +78,9 @@ public void setUpMocks() { when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); indicesAdminClient = mock(IndicesAdminClient.class); - when(indicesAdminClient.prepareCreate(INITIAL_ML_STATE)) - .thenReturn(new CreateIndexRequestBuilder(client, CreateIndexAction.INSTANCE, INITIAL_ML_STATE)); - doAnswer(withResponse(new CreateIndexResponse(true, true, INITIAL_ML_STATE))).when(indicesAdminClient).create(any(), any()); + when(indicesAdminClient.prepareCreate(FIRST_CONCRETE_INDEX)) + .thenReturn(new CreateIndexRequestBuilder(client, CreateIndexAction.INSTANCE, FIRST_CONCRETE_INDEX)); + doAnswer(withResponse(new CreateIndexResponse(true, true, FIRST_CONCRETE_INDEX))).when(indicesAdminClient).create(any(), any()); when(indicesAdminClient.prepareAliases()).thenReturn(new IndicesAliasesRequestBuilder(client, IndicesAliasesAction.INSTANCE)); doAnswer(withResponse(new AcknowledgedResponse(true))).when(indicesAdminClient).aliases(any(), any()); @@ -103,31 +104,31 @@ public void verifyNoMoreInteractionsWithMocks() { public void testCreateStateIndexAndAliasIfNecessary_CleanState() { ClusterState clusterState = createClusterState(Collections.emptyMap()); - AnomalyDetectorsIndex.createStateIndexAndAliasIfNecessary(client, clusterState, new IndexNameExpressionResolver(), finalListener); + createIndexAndAliasIfNecessary(clusterState); InOrder inOrder = inOrder(indicesAdminClient, finalListener); - inOrder.verify(indicesAdminClient).prepareCreate(INITIAL_ML_STATE); + inOrder.verify(indicesAdminClient).prepareCreate(FIRST_CONCRETE_INDEX); inOrder.verify(indicesAdminClient).create(createRequestCaptor.capture(), any()); inOrder.verify(finalListener).onResponse(true); CreateIndexRequest createRequest = createRequestCaptor.getValue(); - assertThat(createRequest.index(), equalTo(INITIAL_ML_STATE)); - assertThat(createRequest.aliases(), equalTo(Collections.singleton(new Alias(ML_STATE_WRITE_ALIAS)))); + assertThat(createRequest.index(), equalTo(FIRST_CONCRETE_INDEX)); + assertThat(createRequest.aliases(), equalTo(Collections.singleton(new Alias(TEST_INDEX_ALIAS)))); } private void assertNoClientInteractionsWhenWriteAliasAlreadyExists(String indexName) { ClusterState clusterState = createClusterState(Collections.singletonMap(indexName, createIndexMetaDataWithAlias(indexName))); - AnomalyDetectorsIndex.createStateIndexAndAliasIfNecessary(client, clusterState, new IndexNameExpressionResolver(), finalListener); + createIndexAndAliasIfNecessary(clusterState); verify(finalListener).onResponse(false); } - public void testCreateStateIndexAndAliasIfNecessary_WriteAliasAlreadyExistsAndPointsAtLegacyStateIndex() { - assertNoClientInteractionsWhenWriteAliasAlreadyExists(LEGACY_ML_STATE); + public void testCreateStateIndexAndAliasIfNecessary_WriteAliasAlreadyExistsAndPointsAtLegacyIndex() { + assertNoClientInteractionsWhenWriteAliasAlreadyExists(LEGACY_INDEX_WITHOUT_SUFFIX); } public void testCreateStateIndexAndAliasIfNecessary_WriteAliasAlreadyExistsAndPointsAtInitialStateIndex() { - assertNoClientInteractionsWhenWriteAliasAlreadyExists(INITIAL_ML_STATE); + assertNoClientInteractionsWhenWriteAliasAlreadyExists(FIRST_CONCRETE_INDEX); } public void testCreateStateIndexAndAliasIfNecessary_WriteAliasAlreadyExistsAndPointsAtSubsequentStateIndex() { @@ -141,8 +142,8 @@ public void testCreateStateIndexAndAliasIfNecessary_WriteAliasAlreadyExistsAndPo private void assertMlStateWriteAliasAddedToMostRecentMlStateIndex(List existingIndexNames, String expectedWriteIndexName) { ClusterState clusterState = createClusterState( - existingIndexNames.stream().collect(toMap(Function.identity(), AnomalyDetectorsIndexTests::createIndexMetaData))); - AnomalyDetectorsIndex.createStateIndexAndAliasIfNecessary(client, clusterState, new IndexNameExpressionResolver(), finalListener); + existingIndexNames.stream().collect(toMap(Function.identity(), MlIndexAndAliasTests::createIndexMetaData))); + createIndexAndAliasIfNecessary(clusterState); InOrder inOrder = inOrder(indicesAdminClient, finalListener); inOrder.verify(indicesAdminClient).prepareAliases(); @@ -152,54 +153,59 @@ private void assertMlStateWriteAliasAddedToMostRecentMlStateIndex(List e IndicesAliasesRequest indicesAliasesRequest = aliasesRequestCaptor.getValue(); assertThat( indicesAliasesRequest.getAliasActions(), - contains(AliasActions.add().alias(ML_STATE_WRITE_ALIAS).index(expectedWriteIndexName))); + contains(AliasActions.add().alias(TEST_INDEX_ALIAS).index(expectedWriteIndexName))); } - public void testCreateStateIndexAndAliasIfNecessary_WriteAliasDoesNotExistButLegacyStateIndexExists() { + public void testCreateStateIndexAndAliasIfNecessary_WriteAliasDoesNotExistButLegacyIndexExists() { assertMlStateWriteAliasAddedToMostRecentMlStateIndex( - Arrays.asList(LEGACY_ML_STATE), LEGACY_ML_STATE); + Arrays.asList(LEGACY_INDEX_WITHOUT_SUFFIX), LEGACY_INDEX_WITHOUT_SUFFIX); } public void testCreateStateIndexAndAliasIfNecessary_WriteAliasDoesNotExistButInitialStateIndexExists() { assertMlStateWriteAliasAddedToMostRecentMlStateIndex( - Arrays.asList(INITIAL_ML_STATE), INITIAL_ML_STATE); + Arrays.asList(FIRST_CONCRETE_INDEX), FIRST_CONCRETE_INDEX); } public void testCreateStateIndexAndAliasIfNecessary_WriteAliasDoesNotExistButSubsequentStateIndicesExist() { assertMlStateWriteAliasAddedToMostRecentMlStateIndex( - Arrays.asList(".ml-state-000003", ".ml-state-000040", ".ml-state-000500"), ".ml-state-000500"); + Arrays.asList("test-000003", "test-000040", "test-000500"), "test-000500"); } - public void testCreateStateIndexAndAliasIfNecessary_WriteAliasDoesNotExistButBothLegacyAndNewStateIndicesDoExist() { + public void testCreateStateIndexAndAliasIfNecessary_WriteAliasDoesNotExistButBothLegacyAndNewIndicesExist() { assertMlStateWriteAliasAddedToMostRecentMlStateIndex( - Arrays.asList(LEGACY_ML_STATE, ".ml-state-000003", ".ml-state-000040", ".ml-state-000500"), ".ml-state-000500"); + Arrays.asList(LEGACY_INDEX_WITHOUT_SUFFIX, "test-000003", "test-000040", "test-000500"), "test-000500"); } - public void testStateIndexNameComparator() { - Comparator comparator = AnomalyDetectorsIndex.STATE_INDEX_NAME_COMPARATOR; + public void testIndexNameComparator() { + Comparator comparator = MlIndexAndAlias.INDEX_NAME_COMPARATOR; assertThat( - Stream.of(".ml-state-000001").max(comparator).get(), - equalTo(".ml-state-000001")); + Stream.of("test-000001").max(comparator).get(), + equalTo("test-000001")); assertThat( - Stream.of(".ml-state-000002", ".ml-state-000001").max(comparator).get(), - equalTo(".ml-state-000002")); + Stream.of("test-000002", "test-000001").max(comparator).get(), + equalTo("test-000002")); assertThat( - Stream.of(".ml-state-000003", ".ml-state-000040", ".ml-state-000500").max(comparator).get(), - equalTo(".ml-state-000500")); + Stream.of("test-000003", "test-000040", "test-000500").max(comparator).get(), + equalTo("test-000500")); assertThat( - Stream.of(".ml-state-000042", ".ml-state-000049", ".ml-state-000038").max(comparator).get(), - equalTo(".ml-state-000049")); + Stream.of("test-000042", "test-000049", "test-000038").max(comparator).get(), + equalTo("test-000049")); assertThat( - Stream.of(".ml-state", ".ml-state-000003", ".ml-state-000040", ".ml-state-000500").max(comparator).get(), - equalTo(".ml-state-000500")); + Stream.of("test", "test-000003", "test-000040", "test-000500").max(comparator).get(), + equalTo("test-000500")); assertThat( - Stream.of(".reindexed-6-ml-state", ".ml-state-000042").max(comparator).get(), - equalTo(".ml-state-000042")); + Stream.of(".reindexed-6-test", "test-000042").max(comparator).get(), + equalTo("test-000042")); assertThat( Stream.of(".a-000002", ".b-000001").max(comparator).get(), equalTo(".a-000002")); } + private void createIndexAndAliasIfNecessary(ClusterState clusterState) { + MlIndexAndAlias.createIndexAndAliasIfNecessary(client, clusterState, new IndexNameExpressionResolver(), + TEST_INDEX_PREFIX, TEST_INDEX_ALIAS, finalListener); + } + @SuppressWarnings("unchecked") private static Answer withResponse(Response response) { return invocationOnMock -> { @@ -234,7 +240,7 @@ private static IndexMetaData createIndexMetaData(String indexName, boolean withA IndexMetaData.Builder builder = IndexMetaData.builder(indexName) .settings(settings); if (withAlias) { - builder.putAlias(AliasMetaData.builder(ML_STATE_WRITE_ALIAS).build()); + builder.putAlias(AliasMetaData.builder(TEST_INDEX_ALIAS).build()); } return builder.build(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfigTests.java index 972bb9cc83138..629a9485a190f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfigTests.java @@ -59,4 +59,21 @@ protected Reader instanceReader() { return SourceConfig::new; } + public void testRequiresRemoteCluster() { + assertFalse(new SourceConfig(new String [] {"index1", "index2", "index3"}, + QueryConfigTests.randomQueryConfig()).requiresRemoteCluster()); + + assertTrue(new SourceConfig(new String [] {"index1", "remote2:index2", "index3"}, + QueryConfigTests.randomQueryConfig()).requiresRemoteCluster()); + + assertTrue(new SourceConfig(new String [] {"index1", "index2", "remote3:index3"}, + QueryConfigTests.randomQueryConfig()).requiresRemoteCluster()); + + assertTrue(new SourceConfig(new String [] {"index1", "remote2:index2", "remote3:index3"}, + QueryConfigTests.randomQueryConfig()).requiresRemoteCluster()); + + assertTrue(new SourceConfig(new String [] {"remote1:index1"}, + QueryConfigTests.randomQueryConfig()).requiresRemoteCluster()); + } + } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformTests.java index 3f0ecdc04f16d..a5fb99c70bd00 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformTests.java @@ -27,7 +27,7 @@ protected TransformTaskParams doParseInstance(XContentParser parser) throws IOEx @Override protected TransformTaskParams createTestInstance() { return new TransformTaskParams(randomAlphaOfLength(10), randomBoolean() ? null : Version.CURRENT, - randomBoolean() ? null : TimeValue.timeValueMillis(randomIntBetween(1_000, 3_600_000))); + randomBoolean() ? null : TimeValue.timeValueMillis(randomIntBetween(1_000, 3_600_000)), randomBoolean()); } @Override diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/CommonEqlRestTestCase.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/CommonEqlRestTestCase.java index 628972c4d20dc..be89d3ee9c27e 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/CommonEqlRestTestCase.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/CommonEqlRestTestCase.java @@ -36,31 +36,31 @@ static class SearchTestConfiguration { } public static final String defaultValidationIndexName = "eql_search_validation_test"; - private static final String validRule = "process where user = 'SYSTEM'"; + private static final String validQuery = "process where user = 'SYSTEM'"; public static final ArrayList searchValidationTests; static { searchValidationTests = new ArrayList<>(); searchValidationTests.add(new SearchTestConfiguration(null, 400, "request body or source parameter is required")); - searchValidationTests.add(new SearchTestConfiguration("{}", 400, "rule is null or empty")); - searchValidationTests.add(new SearchTestConfiguration("{\"rule\": \"\"}", 400, "rule is null or empty")); - searchValidationTests.add(new SearchTestConfiguration("{\"rule\": \"" + validRule + "\", \"timestamp_field\": \"\"}", + searchValidationTests.add(new SearchTestConfiguration("{}", 400, "query is null or empty")); + searchValidationTests.add(new SearchTestConfiguration("{\"query\": \"\"}", 400, "query is null or empty")); + searchValidationTests.add(new SearchTestConfiguration("{\"query\": \"" + validQuery + "\", \"timestamp_field\": \"\"}", 400, "timestamp field is null or empty")); - searchValidationTests.add(new SearchTestConfiguration("{\"rule\": \"" + validRule + "\", \"event_type_field\": \"\"}", + searchValidationTests.add(new SearchTestConfiguration("{\"query\": \"" + validQuery + "\", \"event_type_field\": \"\"}", 400, "event type field is null or empty")); - searchValidationTests.add(new SearchTestConfiguration("{\"rule\": \"" + validRule + "\", \"implicit_join_key_field\": \"\"}", + searchValidationTests.add(new SearchTestConfiguration("{\"query\": \"" + validQuery + "\", \"implicit_join_key_field\": \"\"}", 400, "implicit join key field is null or empty")); - searchValidationTests.add(new SearchTestConfiguration("{\"rule\": \"" + validRule + "\", \"size\": 0}", + searchValidationTests.add(new SearchTestConfiguration("{\"query\": \"" + validQuery + "\", \"size\": 0}", 400, "size must be greater than 0")); - searchValidationTests.add(new SearchTestConfiguration("{\"rule\": \"" + validRule + "\", \"size\": -1}", + searchValidationTests.add(new SearchTestConfiguration("{\"query\": \"" + validQuery + "\", \"size\": -1}", 400, "size must be greater than 0")); - searchValidationTests.add(new SearchTestConfiguration("{\"rule\": \"" + validRule + "\", \"search_after\": null}", + searchValidationTests.add(new SearchTestConfiguration("{\"query\": \"" + validQuery + "\", \"search_after\": null}", 400, "search_after doesn't support values of type: VALUE_NULL")); - searchValidationTests.add(new SearchTestConfiguration("{\"rule\": \"" + validRule + "\", \"search_after\": []}", + searchValidationTests.add(new SearchTestConfiguration("{\"query\": \"" + validQuery + "\", \"search_after\": []}", 400, "must contains at least one value")); - searchValidationTests.add(new SearchTestConfiguration("{\"rule\": \"" + validRule + "\", \"query\": null}", - 400, "query doesn't support values of type: VALUE_NULL")); - searchValidationTests.add(new SearchTestConfiguration("{\"rule\": \"" + validRule + "\", \"query\": {}}", + searchValidationTests.add(new SearchTestConfiguration("{\"query\": \"" + validQuery + "\", \"filter\": null}", + 400, "filter doesn't support values of type: VALUE_NULL")); + searchValidationTests.add(new SearchTestConfiguration("{\"query\": \"" + validQuery + "\", \"filter\": {}}", 400, "query malformed, empty clause found")); } diff --git a/x-pack/plugin/eql/qa/rest/src/test/resources/rest-api-spec/test/eql/10_basic.yml b/x-pack/plugin/eql/qa/rest/src/test/resources/rest-api-spec/test/eql/10_basic.yml index 633b6225780a5..d4ef1aef83eae 100644 --- a/x-pack/plugin/eql/qa/rest/src/test/resources/rest-api-spec/test/eql/10_basic.yml +++ b/x-pack/plugin/eql/qa/rest/src/test/resources/rest-api-spec/test/eql/10_basic.yml @@ -17,7 +17,7 @@ setup: eql.search: index: eql_test body: - rule: "process where user = 'SYSTEM'" + query: "process where user = 'SYSTEM'" - match: {timed_out: false} - match: {hits.total.value: 1} diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequest.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequest.java index 2d5aa5f8c3bd2..89cacf44e719f 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequest.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequest.java @@ -37,29 +37,29 @@ public class EqlSearchRequest extends ActionRequest implements IndicesRequest.Re private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false); - private QueryBuilder query = null; + private QueryBuilder filter = null; private String timestampField = FIELD_TIMESTAMP; private String eventTypeField = FIELD_EVENT_TYPE; private String implicitJoinKeyField = IMPLICIT_JOIN_KEY; private int fetchSize = FETCH_SIZE; private SearchAfterBuilder searchAfterBuilder; - private String rule; + private String query; - static final String KEY_QUERY = "query"; + static final String KEY_FILTER = "filter"; static final String KEY_TIMESTAMP_FIELD = "timestamp_field"; static final String KEY_EVENT_TYPE_FIELD = "event_type_field"; static final String KEY_IMPLICIT_JOIN_KEY_FIELD = "implicit_join_key_field"; static final String KEY_SIZE = "size"; static final String KEY_SEARCH_AFTER = "search_after"; - static final String KEY_RULE = "rule"; + static final String KEY_QUERY = "query"; - static final ParseField QUERY = new ParseField(KEY_QUERY); + static final ParseField FILTER = new ParseField(KEY_FILTER); static final ParseField TIMESTAMP_FIELD = new ParseField(KEY_TIMESTAMP_FIELD); static final ParseField EVENT_TYPE_FIELD = new ParseField(KEY_EVENT_TYPE_FIELD); static final ParseField IMPLICIT_JOIN_KEY_FIELD = new ParseField(KEY_IMPLICIT_JOIN_KEY_FIELD); static final ParseField SIZE = new ParseField(KEY_SIZE); static final ParseField SEARCH_AFTER = new ParseField(KEY_SEARCH_AFTER); - static final ParseField RULE = new ParseField(KEY_RULE); + static final ParseField QUERY = new ParseField(KEY_QUERY); private static final ObjectParser PARSER = objectParser(EqlSearchRequest::new); @@ -71,13 +71,13 @@ public EqlSearchRequest(StreamInput in) throws IOException { super(in); indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); - query = in.readOptionalNamedWriteable(QueryBuilder.class); + filter = in.readOptionalNamedWriteable(QueryBuilder.class); timestampField = in.readString(); eventTypeField = in.readString(); implicitJoinKeyField = in.readString(); fetchSize = in.readVInt(); searchAfterBuilder = in.readOptionalWriteable(SearchAfterBuilder::new); - rule = in.readString(); + query = in.readString(); } @Override @@ -99,8 +99,8 @@ public ActionRequestValidationException validate() { validationException = addValidationError("indicesOptions is null", validationException); } - if (rule == null || rule.isEmpty()) { - validationException = addValidationError("rule is null or empty", validationException); + if (query == null || query.isEmpty()) { + validationException = addValidationError("query is null or empty", validationException); } if (timestampField == null || timestampField.isEmpty()) { @@ -124,8 +124,8 @@ public ActionRequestValidationException validate() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - if (query != null) { - builder.field(KEY_QUERY, query); + if (filter != null) { + builder.field(KEY_FILTER, filter); } builder.field(KEY_TIMESTAMP_FIELD, timestampField()); builder.field(KEY_EVENT_TYPE_FIELD, eventTypeField()); @@ -138,7 +138,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.array(SEARCH_AFTER.getPreferredName(), searchAfterBuilder.getSortValues()); } - builder.field(KEY_RULE, rule); + builder.field(KEY_QUERY, query); return builder; } @@ -149,15 +149,15 @@ public static EqlSearchRequest fromXContent(XContentParser parser) { protected static ObjectParser objectParser(Supplier supplier) { ObjectParser parser = new ObjectParser<>("eql/search", false, supplier); - parser.declareObject(EqlSearchRequest::query, - (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), QUERY); + parser.declareObject(EqlSearchRequest::filter, + (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), FILTER); parser.declareString(EqlSearchRequest::timestampField, TIMESTAMP_FIELD); parser.declareString(EqlSearchRequest::eventTypeField, EVENT_TYPE_FIELD); parser.declareString(EqlSearchRequest::implicitJoinKeyField, IMPLICIT_JOIN_KEY_FIELD); parser.declareInt(EqlSearchRequest::fetchSize, SIZE); parser.declareField(EqlSearchRequest::setSearchAfter, SearchAfterBuilder::fromXContent, SEARCH_AFTER, ObjectParser.ValueType.OBJECT_ARRAY); - parser.declareString(EqlSearchRequest::rule, RULE); + parser.declareString(EqlSearchRequest::query, QUERY); return parser; } @@ -167,10 +167,10 @@ public EqlSearchRequest indices(String... indices) { return this; } - public QueryBuilder query() { return this.query; } + public QueryBuilder filter() { return this.filter; } - public EqlSearchRequest query(QueryBuilder query) { - this.query = query; + public EqlSearchRequest filter(QueryBuilder filter) { + this.filter = filter; return this; } @@ -219,10 +219,10 @@ private EqlSearchRequest setSearchAfter(SearchAfterBuilder builder) { return this; } - public String rule() { return this.rule; } + public String query() { return this.query; } - public EqlSearchRequest rule(String rule) { - this.rule = rule; + public EqlSearchRequest query(String query) { + this.query = query; return this; } @@ -231,13 +231,13 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArrayNullable(indices); indicesOptions.writeIndicesOptions(out); - out.writeOptionalNamedWriteable(query); + out.writeOptionalNamedWriteable(filter); out.writeString(timestampField); out.writeString(eventTypeField); out.writeString(implicitJoinKeyField); out.writeVInt(fetchSize); out.writeOptionalWriteable(searchAfterBuilder); - out.writeString(rule); + out.writeString(query); } @Override @@ -249,16 +249,15 @@ public boolean equals(Object o) { return false; } EqlSearchRequest that = (EqlSearchRequest) o; - return - fetchSize == that.fetchSize && - Arrays.equals(indices, that.indices) && - Objects.equals(indicesOptions, that.indicesOptions) && - Objects.equals(query, that.query) && - Objects.equals(timestampField, that.timestampField) && - Objects.equals(eventTypeField, that.eventTypeField) && - Objects.equals(implicitJoinKeyField, that.implicitJoinKeyField) && - Objects.equals(searchAfterBuilder, that.searchAfterBuilder) && - Objects.equals(rule, that.rule); + return fetchSize == that.fetchSize && + Arrays.equals(indices, that.indices) && + Objects.equals(indicesOptions, that.indicesOptions) && + Objects.equals(filter, that.filter) && + Objects.equals(timestampField, that.timestampField) && + Objects.equals(eventTypeField, that.eventTypeField) && + Objects.equals(implicitJoinKeyField, that.implicitJoinKeyField) && + Objects.equals(searchAfterBuilder, that.searchAfterBuilder) && + Objects.equals(query, that.query); } @Override @@ -266,13 +265,13 @@ public int hashCode() { return Objects.hash( Arrays.hashCode(indices), indicesOptions, - query, + filter, fetchSize, timestampField, eventTypeField, implicitJoinKeyField, searchAfterBuilder, - rule); + query); } @Override diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestBuilder.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestBuilder.java index 2e808501ae9f8..743b297a58aba 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestBuilder.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestBuilder.java @@ -20,8 +20,8 @@ public EqlSearchRequestBuilder indices(String... indices) { return this; } - public EqlSearchRequestBuilder query(QueryBuilder query) { - request.query(query); + public EqlSearchRequestBuilder filter(QueryBuilder filter) { + request.filter(filter); return this; } @@ -50,8 +50,8 @@ public EqlSearchRequestBuilder searchAfter(Object[] values) { return this; } - public EqlSearchRequestBuilder rule(String rule) { - request.rule(rule); + public EqlSearchRequestBuilder query(String query) { + request.query(query); return this; } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/TransportEqlSearchAction.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/TransportEqlSearchAction.java index 13aa1d1f62c58..24a3cda7b8f5f 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/TransportEqlSearchAction.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/TransportEqlSearchAction.java @@ -56,7 +56,7 @@ public static void operation(PlanExecutor planExecutor, EqlSearchRequest request String clusterName, ActionListener listener) { // TODO: these should be sent by the client ZoneId zoneId = DateUtils.of("Z"); - QueryBuilder filter = request.query(); + QueryBuilder filter = request.filter(); TimeValue timeout = TimeValue.timeValueSeconds(30); boolean includeFrozen = request.indicesOptions().ignoreThrottled() == false; String clientId = null; @@ -68,7 +68,7 @@ public static void operation(PlanExecutor planExecutor, EqlSearchRequest request Configuration cfg = new Configuration(request.indices(), zoneId, username, clusterName, filter, timeout, request.fetchSize(), includeFrozen, clientId); - planExecutor.eql(cfg, request.rule(), params, wrap(r -> listener.onResponse(createResponse(r)), listener::onFailure)); + planExecutor.eql(cfg, request.query(), params, wrap(r -> listener.onResponse(createResponse(r)), listener::onFailure)); } static EqlSearchResponse createResponse(Results results) { diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlActionIT.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlActionIT.java index 2ead24584572f..d0ff969968115 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlActionIT.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlActionIT.java @@ -98,7 +98,7 @@ public EqlActionIT(int num, EqlSpec spec) { public final void test() { EqlSearchResponse response = new EqlSearchRequestBuilder(client(), EqlSearchAction.INSTANCE) - .indices(testIndexName).rule(spec.query()).get(); + .indices(testIndexName).query(spec.query()).get(); List events = response.hits().events(); assertNotNull(events); diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlRequestParserTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlRequestParserTests.java index 0e9f551a5a1ec..15828b93b97ba 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlRequestParserTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlRequestParserTests.java @@ -31,7 +31,7 @@ public void testUnknownFieldParsingErrors() throws IOException { } public void testSearchRequestParser() throws IOException { - assertParsingErrorMessage("{\"query\" : 123}", "query doesn't support values of type: VALUE_NUMBER", + assertParsingErrorMessage("{\"filter\" : 123}", "filter doesn't support values of type: VALUE_NUMBER", EqlSearchRequest::fromXContent); assertParsingErrorMessage("{\"timestamp_field\" : 123}", "timestamp_field doesn't support values of type: VALUE_NUMBER", EqlSearchRequest::fromXContent); @@ -43,32 +43,32 @@ public void testSearchRequestParser() throws IOException { assertParsingErrorMessage("{\"search_after\" : 123}", "search_after doesn't support values of type: VALUE_NUMBER", EqlSearchRequest::fromXContent); assertParsingErrorMessage("{\"size\" : \"foo\"}", "failed to parse field [size]", EqlSearchRequest::fromXContent); - assertParsingErrorMessage("{\"rule\" : 123}", "rule doesn't support values of type: VALUE_NUMBER", + assertParsingErrorMessage("{\"query\" : 123}", "query doesn't support values of type: VALUE_NUMBER", EqlSearchRequest::fromXContent); - assertParsingErrorMessage("{\"rule\" : \"whatever\", \"size\":\"abc\"}", "failed to parse field [size]", + assertParsingErrorMessage("{\"query\" : \"whatever\", \"size\":\"abc\"}", "failed to parse field [size]", EqlSearchRequest::fromXContent); - EqlSearchRequest request = generateRequest("endgame-*", "{\"query\" : {\"match\" : {\"foo\":\"bar\"}}, " + EqlSearchRequest request = generateRequest("endgame-*", "{\"filter\" : {\"match\" : {\"foo\":\"bar\"}}, " + "\"timestamp_field\" : \"tsf\", " + "\"event_type_field\" : \"etf\"," + "\"implicit_join_key_field\" : \"imjf\"," + "\"search_after\" : [ 12345678, \"device-20184\", \"/user/local/foo.exe\", \"2019-11-26T00:45:43.542\" ]," + "\"size\" : \"101\"," - + "\"rule\" : \"file where user != 'SYSTEM' by file_path\"" + + "\"query\" : \"file where user != 'SYSTEM' by file_path\"" + "}", EqlSearchRequest::fromXContent); assertArrayEquals(new String[]{"endgame-*"}, request.indices()); assertNotNull(request.query()); - assertTrue(request.query() instanceof MatchQueryBuilder); - MatchQueryBuilder query = (MatchQueryBuilder)request.query(); - assertEquals("foo", query.fieldName()); - assertEquals("bar", query.value()); + assertTrue(request.filter() instanceof MatchQueryBuilder); + MatchQueryBuilder filter = (MatchQueryBuilder)request.filter(); + assertEquals("foo", filter.fieldName()); + assertEquals("bar", filter.value()); assertEquals("tsf", request.timestampField()); assertEquals("etf", request.eventTypeField()); assertEquals("imjf", request.implicitJoinKeyField()); assertArrayEquals(new Object[]{12345678, "device-20184", "/user/local/foo.exe", "2019-11-26T00:45:43.542"}, request.searchAfter()); assertEquals(101, request.fetchSize()); - assertEquals("file where user != 'SYSTEM' by file_path", request.rule()); + assertEquals("file where user != 'SYSTEM' by file_path", request.query()); } private EqlSearchRequest generateRequest(String index, String json, Function fromXContent) diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestTests.java index 91c70f29f23cd..008355b2da4fb 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestTests.java @@ -32,7 +32,7 @@ public class EqlSearchRequestTests extends AbstractSerializingTestCase { // TODO: possibly add mutations - static String defaultTestQuery = "{\n" + + static String defaultTestFilter = "{\n" + " \"match\" : {\n" + " \"foo\": \"bar\"\n" + " }" + @@ -59,15 +59,15 @@ protected NamedXContentRegistry xContentRegistry() { @Override protected EqlSearchRequest createTestInstance() { try { - QueryBuilder query = parseQuery(defaultTestQuery); + QueryBuilder filter = parseFilter(defaultTestFilter); EqlSearchRequest request = new EqlSearchRequest() .indices(new String[]{defaultTestIndex}) - .query(query) + .filter(filter) .timestampField(randomAlphaOfLength(10)) .eventTypeField(randomAlphaOfLength(10)) .implicitJoinKeyField(randomAlphaOfLength(10)) .fetchSize(randomIntBetween(1, 50)) - .rule(randomAlphaOfLength(10)); + .query(randomAlphaOfLength(10)); if (randomBoolean()) { request.searchAfter(randomJsonSearchFromBuilder()); @@ -79,12 +79,12 @@ protected EqlSearchRequest createTestInstance() { return null; } - protected QueryBuilder parseQuery(String queryAsString) throws IOException { - XContentParser parser = createParser(JsonXContent.jsonXContent, queryAsString); - return parseQuery(parser); + protected QueryBuilder parseFilter(String filter) throws IOException { + XContentParser parser = createParser(JsonXContent.jsonXContent, filter); + return parseFilter(parser); } - protected QueryBuilder parseQuery(XContentParser parser) throws IOException { + protected QueryBuilder parseFilter(XContentParser parser) throws IOException { QueryBuilder parseInnerQueryBuilder = parseInnerQueryBuilder(parser); assertNull(parser.nextToken()); return parseInnerQueryBuilder; diff --git a/x-pack/plugin/mapper-constant-keyword/build.gradle b/x-pack/plugin/mapper-constant-keyword/build.gradle new file mode 100644 index 0000000000000..ba4e0d1b2a757 --- /dev/null +++ b/x-pack/plugin/mapper-constant-keyword/build.gradle @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +evaluationDependsOn(xpackModule('core')) + +apply plugin: 'elasticsearch.esplugin' + +esplugin { + name 'constant-keyword' + description 'Module for the constant-keyword field type, which is a specialization of keyword for the case when all documents have the same value.' + classname 'org.elasticsearch.xpack.constantkeyword.ConstantKeywordMapperPlugin' + extendedPlugins = ['x-pack-core'] +} +archivesBaseName = 'x-pack-constant-keyword' + +dependencies { + compileOnly project(path: xpackModule('core'), configuration: 'default') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') +} + +integTest.enabled = false diff --git a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/ConstantKeywordInfoTransportAction.java b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/ConstantKeywordInfoTransportAction.java new file mode 100644 index 0000000000000..621138d1e0230 --- /dev/null +++ b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/ConstantKeywordInfoTransportAction.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.constantkeyword; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureTransportAction; + +public class ConstantKeywordInfoTransportAction extends XPackInfoFeatureTransportAction { + + private final XPackLicenseState licenseState; + + @Inject + public ConstantKeywordInfoTransportAction(TransportService transportService, ActionFilters actionFilters, + Settings settings, XPackLicenseState licenseState) { + super(XPackInfoFeatureAction.CONSTANT_KEYWORD.name(), transportService, actionFilters); + this.licenseState = licenseState; + } + + @Override + public String name() { + return XPackField.CONSTANT_KEYWORD; + } + + @Override + public boolean available() { + return licenseState.isConstantKeywordAllowed(); + } + + @Override + public boolean enabled() { + return true; + } + +} diff --git a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/ConstantKeywordMapperPlugin.java b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/ConstantKeywordMapperPlugin.java new file mode 100644 index 0000000000000..b129d3074dc31 --- /dev/null +++ b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/ConstantKeywordMapperPlugin.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.constantkeyword; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.Mapper; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.MapperPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.xpack.constantkeyword.mapper.ConstantKeywordFieldMapper; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singletonMap; + +public class ConstantKeywordMapperPlugin extends Plugin implements MapperPlugin, ActionPlugin { + + public ConstantKeywordMapperPlugin(Settings settings) {} + + @Override + public Map getMappers() { + return singletonMap(ConstantKeywordFieldMapper.CONTENT_TYPE, new ConstantKeywordFieldMapper.TypeParser()); + } + + @Override + public List> getActions() { + return Arrays.asList( + new ActionHandler<>(XPackInfoFeatureAction.CONSTANT_KEYWORD, ConstantKeywordInfoTransportAction.class)); + } +} diff --git a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java new file mode 100644 index 0000000000000..115968b69ee65 --- /dev/null +++ b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java @@ -0,0 +1,308 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + + +package org.elasticsearch.xpack.constantkeyword.mapper; + +import java.io.IOException; +import java.time.ZoneId; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.UnicodeUtil; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.CharacterRunAutomaton; +import org.apache.lucene.util.automaton.LevenshteinAutomata; +import org.apache.lucene.util.automaton.RegExp; +import org.elasticsearch.common.geo.ShapeRelation; +import org.elasticsearch.common.lucene.BytesRefs; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.DateMathParser; +import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.plain.ConstantIndexFieldData; +import org.elasticsearch.index.mapper.ConstantFieldType; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.Mapper; +import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.TypeParsers; +import org.elasticsearch.index.query.QueryShardContext; + +/** + * A {@link FieldMapper} that assigns every document the same value. + */ +public class ConstantKeywordFieldMapper extends FieldMapper { + + public static final String CONTENT_TYPE = "constant_keyword"; + + public static class Defaults { + public static final MappedFieldType FIELD_TYPE = new ConstantKeywordFieldType(); + static { + FIELD_TYPE.setIndexOptions(IndexOptions.NONE); + FIELD_TYPE.freeze(); + } + } + + public static class Builder extends FieldMapper.Builder { + + public Builder(String name) { + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); + builder = this; + } + + public Builder setValue(String value) { + fieldType().setValue(value); + return this; + } + + @Override + public ConstantKeywordFieldType fieldType() { + return (ConstantKeywordFieldType) super.fieldType(); + } + + @Override + public ConstantKeywordFieldMapper build(BuilderContext context) { + setupFieldType(context); + return new ConstantKeywordFieldMapper( + name, fieldType, defaultFieldType, + context.indexSettings()); + } + } + + public static class TypeParser implements Mapper.TypeParser { + @Override + public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + Object value = null; + if (node.containsKey("value")) { + value = node.remove("value"); + if (value == null) { + throw new MapperParsingException("Property [value] of field [" + name + "] can't be [null]."); + } + if (value instanceof Number == false && value instanceof CharSequence == false) { + throw new MapperParsingException("Property [value] of field [" + name + + "] must be a number or a string, but got [" + value + "]"); + } + } + ConstantKeywordFieldMapper.Builder builder = new ConstantKeywordFieldMapper.Builder(name); + if (value != null) { + builder.setValue(value.toString()); + } + TypeParsers.parseMeta(builder, name, node); + return builder; + } + } + + public static final class ConstantKeywordFieldType extends ConstantFieldType { + + private String value; + + public ConstantKeywordFieldType() { + super(); + } + + protected ConstantKeywordFieldType(ConstantKeywordFieldType ref) { + super(ref); + this.value = ref.value; + } + + public ConstantKeywordFieldType clone() { + return new ConstantKeywordFieldType(this); + } + + @Override + public boolean equals(Object o) { + if (super.equals(o) == false) { + return false; + } + ConstantKeywordFieldType other = (ConstantKeywordFieldType) o; + return Objects.equals(value, other.value); + } + + @Override + public void checkCompatibility(MappedFieldType newFT, List conflicts) { + super.checkCompatibility(newFT, conflicts); + ConstantKeywordFieldType newConstantKeywordFT = (ConstantKeywordFieldType) newFT; + if (this.value != null) { + if (newConstantKeywordFT.value == null) { + conflicts.add("mapper [" + name() + "] cannot unset [value]"); + } else if (Objects.equals(value, newConstantKeywordFT.value) == false) { + conflicts.add("mapper [" + name() + "] has different [value] from the value that is configured in mappings: [" + value + + "] vs. [" + newConstantKeywordFT.value + "]"); + } + } + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hashCode(value); + } + + /** Return the value that this field wraps. This may be {@code null} if the field is not configured yet. */ + public String value() { + return value; + } + + /** Set the value. */ + public void setValue(String value) { + checkIfFrozen(); + this.value = Objects.requireNonNull(value); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { + return new ConstantIndexFieldData.Builder(mapperService -> value); + } + + @Override + protected boolean matches(String pattern, QueryShardContext context) { + if (value == null) { + return false; + } + return Regex.simpleMatch(pattern, value); + } + + @Override + public Query rangeQuery( + Object lowerTerm, Object upperTerm, + boolean includeLower, boolean includeUpper, + ShapeRelation relation, ZoneId timeZone, DateMathParser parser, + QueryShardContext context) { + if (this.value == null) { + return new MatchNoDocsQuery(); + } + + final BytesRef valueAsBytesRef = new BytesRef(value); + if (lowerTerm != null && BytesRefs.toBytesRef(lowerTerm).compareTo(valueAsBytesRef) >= (includeLower ? 1 : 0)) { + return new MatchNoDocsQuery(); + } + if (upperTerm != null && valueAsBytesRef.compareTo(BytesRefs.toBytesRef(upperTerm)) >= (includeUpper ? 1 : 0)) { + return new MatchNoDocsQuery(); + } + return new MatchAllDocsQuery(); + } + + @Override + public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, + boolean transpositions, QueryShardContext context) { + if (this.value == null) { + return new MatchNoDocsQuery(); + } + + final String termAsString = BytesRefs.toString(value); + final int maxEdits = fuzziness.asDistance(termAsString); + + final int[] termText = new int[termAsString.codePointCount(0, termAsString.length())]; + for (int cp, i = 0, j = 0; i < termAsString.length(); i += Character.charCount(cp)) { + termText[j++] = cp = termAsString.codePointAt(i); + } + final int termLength = termText.length; + + prefixLength = Math.min(prefixLength, termLength); + final String suffix = UnicodeUtil.newString(termText, prefixLength, termText.length - prefixLength); + final LevenshteinAutomata builder = new LevenshteinAutomata(suffix, transpositions); + final String prefix = UnicodeUtil.newString(termText, 0, prefixLength); + final Automaton automaton = builder.toAutomaton(maxEdits, prefix); + + final CharacterRunAutomaton runAutomaton = new CharacterRunAutomaton(automaton); + if (runAutomaton.run(this.value)) { + return new MatchAllDocsQuery(); + } else { + return new MatchNoDocsQuery(); + } + } + + @Override + public Query regexpQuery(String value, int flags, int maxDeterminizedStates, + MultiTermQuery.RewriteMethod method, QueryShardContext context) { + if (this.value == null) { + return new MatchNoDocsQuery(); + } + + final Automaton automaton = new RegExp(value, flags).toAutomaton(maxDeterminizedStates); + final CharacterRunAutomaton runAutomaton = new CharacterRunAutomaton(automaton); + if (runAutomaton.run(this.value)) { + return new MatchAllDocsQuery(); + } else { + return new MatchNoDocsQuery(); + } + } + + } + + ConstantKeywordFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, + Settings indexSettings) { + super(simpleName, fieldType, defaultFieldType, indexSettings, MultiFields.empty(), CopyTo.empty()); + } + + @Override + protected ConstantKeywordFieldMapper clone() { + return (ConstantKeywordFieldMapper) super.clone(); + } + + @Override + public ConstantKeywordFieldType fieldType() { + return (ConstantKeywordFieldType) super.fieldType(); + } + + @Override + protected void parseCreateField(ParseContext context, List fields) throws IOException { + String value; + if (context.externalValueSet()) { + value = context.externalValue().toString(); + } else { + XContentParser parser = context.parser(); + value = parser.textOrNull(); + } + + if (value == null) { + throw new IllegalArgumentException("[constant_keyword] field [" + name() + "] doesn't accept [null] values"); + } + + if (fieldType().value == null) { + ConstantKeywordFieldType newFieldType = new ConstantKeywordFieldType(fieldType()); + newFieldType.setValue(value); + newFieldType.freeze(); + Mapper update = new ConstantKeywordFieldMapper( + simpleName(), newFieldType, defaultFieldType, context.indexSettings().getSettings()); + context.addDynamicMapper(update); + } else if (Objects.equals(fieldType().value, value) == false) { + throw new IllegalArgumentException("[constant_keyword] field [" + name() + + "] only accepts values that are equal to the value defined in the mappings [" + fieldType().value() + + "], but got [" + value + "]"); + } + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + @Override + protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { + super.doXContentBody(builder, includeDefaults, params); + if (fieldType().value() != null) { + builder.field("value", fieldType().value()); + } + } +} diff --git a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java new file mode 100644 index 0000000000000..587f032563b2d --- /dev/null +++ b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.constantkeyword.mapper; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.MapperService.MergeReason; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.xpack.constantkeyword.ConstantKeywordMapperPlugin; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; + +import java.util.Collection; +import java.util.Collections; + +public class ConstantKeywordFieldMapperTests extends ESSingleNodeTestCase { + + @Override + protected Collection> getPlugins() { + return pluginList(ConstantKeywordMapperPlugin.class, LocalStateCompositeXPackPlugin.class); + } + + public void testDefaults() throws Exception { + IndexService indexService = createIndex("test"); + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") + .startObject("properties").startObject("field").field("type", "constant_keyword") + .field("value", "foo").endObject().endObject().endObject().endObject()); + DocumentMapper mapper = indexService.mapperService().merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); + assertEquals(mapping, mapper.mappingSource().toString()); + + BytesReference source = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().endObject()); + ParsedDocument doc = mapper.parse(new SourceToParse("test", "1", source, XContentType.JSON)); + assertNull(doc.rootDoc().getField("field")); + + source = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "foo").endObject()); + doc = mapper.parse(new SourceToParse("test", "1", source, XContentType.JSON)); + assertNull(doc.rootDoc().getField("field")); + + BytesReference illegalSource = BytesReference.bytes(XContentFactory.jsonBuilder() + .startObject().field("field", "bar").endObject()); + MapperParsingException e = expectThrows(MapperParsingException.class, + () -> mapper.parse(new SourceToParse("test", "1", illegalSource, XContentType.JSON))); + assertEquals("[constant_keyword] field [field] only accepts values that are equal to the value defined in the mappings [foo], " + + "but got [bar]", e.getCause().getMessage()); + } + + public void testDynamicValue() throws Exception { + IndexService indexService = createIndex("test"); + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") + .startObject("properties").startObject("field").field("type", "constant_keyword") + .endObject().endObject().endObject().endObject()); + DocumentMapper mapper = indexService.mapperService().merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); + assertEquals(mapping, mapper.mappingSource().toString()); + + BytesReference source = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "foo").endObject()); + ParsedDocument doc = mapper.parse(new SourceToParse("test", "1", source, XContentType.JSON)); + assertNull(doc.rootDoc().getField("field")); + assertNotNull(doc.dynamicMappingsUpdate()); + + CompressedXContent mappingUpdate = new CompressedXContent(Strings.toString(doc.dynamicMappingsUpdate())); + DocumentMapper updatedMapper = indexService.mapperService().merge("_doc", mappingUpdate, MergeReason.MAPPING_UPDATE); + String expectedMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") + .startObject("properties").startObject("field").field("type", "constant_keyword") + .field("value", "foo").endObject().endObject().endObject().endObject()); + assertEquals(expectedMapping, updatedMapper.mappingSource().toString()); + + doc = updatedMapper.parse(new SourceToParse("test", "1", source, XContentType.JSON)); + assertNull(doc.rootDoc().getField("field")); + assertNull(doc.dynamicMappingsUpdate()); + } + + public void testMeta() throws Exception { + IndexService indexService = createIndex("test"); + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") + .startObject("properties").startObject("field").field("type", "constant_keyword") + .field("meta", Collections.singletonMap("foo", "bar")) + .endObject().endObject().endObject().endObject()); + + DocumentMapper mapper = indexService.mapperService().merge("_doc", + new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); + assertEquals(mapping, mapper.mappingSource().toString()); + + String mapping2 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") + .startObject("properties").startObject("field").field("type", "constant_keyword") + .endObject().endObject().endObject().endObject()); + mapper = indexService.mapperService().merge("_doc", + new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); + assertEquals(mapping2, mapper.mappingSource().toString()); + + String mapping3 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc") + .startObject("properties").startObject("field").field("type", "constant_keyword") + .field("meta", Collections.singletonMap("baz", "quux")) + .endObject().endObject().endObject().endObject()); + mapper = indexService.mapperService().merge("_doc", + new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE); + assertEquals(mapping3, mapper.mappingSource().toString()); + } +} diff --git a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldTypeTests.java b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldTypeTests.java new file mode 100644 index 0000000000000..45843d3b3102f --- /dev/null +++ b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldTypeTests.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.constantkeyword.mapper; + +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.util.automaton.RegExp; +import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.index.mapper.FieldTypeTestCase; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.xpack.constantkeyword.mapper.ConstantKeywordFieldMapper.ConstantKeywordFieldType; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +public class ConstantKeywordFieldTypeTests extends FieldTypeTestCase { + + @Before + public void setupProperties() { + addModifier(new Modifier("value", false) { + @Override + public void modify(MappedFieldType type) { + ((ConstantKeywordFieldType) type).setValue("bar"); + } + }); + } + + public void testSetValue() { + ConstantKeywordFieldType ft1 = new ConstantKeywordFieldType(); + ft1.setName("field"); + ConstantKeywordFieldType ft2 = new ConstantKeywordFieldType(); + ft2.setName("field"); + ft2.setValue("bar"); + List conflicts = new ArrayList<>(); + ft1.checkCompatibility(ft2, conflicts); + assertEquals(Collections.emptyList(), conflicts); + } + + public void testUnsetValue() { + ConstantKeywordFieldType ft1 = new ConstantKeywordFieldType(); + ft1.setName("field"); + ft1.setValue("foo"); + ConstantKeywordFieldType ft2 = new ConstantKeywordFieldType(); + ft2.setName("field"); + List conflicts = new ArrayList<>(); + ft1.checkCompatibility(ft2, conflicts); + assertEquals(Collections.singletonList("mapper [field] cannot unset [value]"), conflicts); + } + + @Override + protected MappedFieldType createDefaultFieldType() { + ConstantKeywordFieldType ft = new ConstantKeywordFieldType(); + ft.setValue("foo"); + return ft; + } + + public void testTermQuery() { + ConstantKeywordFieldType ft = new ConstantKeywordFieldType(); + assertEquals(new MatchNoDocsQuery(), ft.termQuery("foo", null)); + ft.setValue("foo"); + assertEquals(new MatchAllDocsQuery(), ft.termQuery("foo", null)); + assertEquals(new MatchNoDocsQuery(), ft.termQuery("bar", null)); + } + + public void testTermsQuery() { + ConstantKeywordFieldType ft = new ConstantKeywordFieldType(); + assertEquals(new MatchNoDocsQuery(), ft.termsQuery(Collections.singletonList("foo"), null)); + ft.setValue("foo"); + assertEquals(new MatchAllDocsQuery(), ft.termsQuery(Collections.singletonList("foo"), null)); + assertEquals(new MatchAllDocsQuery(), ft.termsQuery(Arrays.asList("bar", "foo", "quux"), null)); + assertEquals(new MatchNoDocsQuery(), ft.termsQuery(Collections.emptyList(), null)); + assertEquals(new MatchNoDocsQuery(), ft.termsQuery(Collections.singletonList("bar"), null)); + assertEquals(new MatchNoDocsQuery(), ft.termsQuery(Arrays.asList("bar", "quux"), null)); + } + + public void testWildcardQuery() { + ConstantKeywordFieldType ft = new ConstantKeywordFieldType(); + assertEquals(new MatchNoDocsQuery(), ft.wildcardQuery("f*o", null, null)); + ft.setValue("foo"); + assertEquals(new MatchAllDocsQuery(), ft.wildcardQuery("f*o", null, null)); + assertEquals(new MatchNoDocsQuery(), ft.wildcardQuery("b*r", null, null)); + } + + public void testPrefixQuery() { + ConstantKeywordFieldType ft = new ConstantKeywordFieldType(); + assertEquals(new MatchNoDocsQuery(), ft.prefixQuery("fo", null, null)); + ft.setValue("foo"); + assertEquals(new MatchAllDocsQuery(), ft.prefixQuery("fo", null, null)); + assertEquals(new MatchNoDocsQuery(), ft.prefixQuery("ba", null, null)); + } + + public void testRangeQuery() { + ConstantKeywordFieldType ft = new ConstantKeywordFieldType(); + assertEquals(new MatchNoDocsQuery(), ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null, null, null, null)); + assertEquals(new MatchNoDocsQuery(), ft.rangeQuery(null, "foo", randomBoolean(), randomBoolean(), null, null, null, null)); + assertEquals(new MatchNoDocsQuery(), ft.rangeQuery("foo", null, randomBoolean(), randomBoolean(), null, null, null, null)); + ft.setValue("foo"); + assertEquals(new MatchAllDocsQuery(), ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null, null, null, null)); + assertEquals(new MatchAllDocsQuery(), ft.rangeQuery("foo", null, true, randomBoolean(), null, null, null, null)); + assertEquals(new MatchNoDocsQuery(), ft.rangeQuery("foo", null, false, randomBoolean(), null, null, null, null)); + assertEquals(new MatchAllDocsQuery(), ft.rangeQuery(null, "foo", randomBoolean(), true, null, null, null, null)); + assertEquals(new MatchNoDocsQuery(), ft.rangeQuery(null, "foo", randomBoolean(), false, null, null, null, null)); + assertEquals(new MatchAllDocsQuery(), ft.rangeQuery("abc", "xyz", randomBoolean(), randomBoolean(), null, null, null, null)); + assertEquals(new MatchNoDocsQuery(), ft.rangeQuery("abc", "def", randomBoolean(), randomBoolean(), null, null, null, null)); + assertEquals(new MatchNoDocsQuery(), ft.rangeQuery("mno", "xyz", randomBoolean(), randomBoolean(), null, null, null, null)); + } + + public void testFuzzyQuery() { + ConstantKeywordFieldType ft = new ConstantKeywordFieldType(); + assertEquals(new MatchNoDocsQuery(), ft.fuzzyQuery("fooquux", Fuzziness.AUTO, 3, 50, randomBoolean(), null)); + ft.setValue("foobar"); + assertEquals(new MatchAllDocsQuery(), ft.fuzzyQuery("foobaz", Fuzziness.AUTO, 3, 50, randomBoolean(), null)); + assertEquals(new MatchNoDocsQuery(), ft.fuzzyQuery("fooquux", Fuzziness.AUTO, 3, 50, randomBoolean(), null)); + } + + public void testRegexpQuery() { + ConstantKeywordFieldType ft = new ConstantKeywordFieldType(); + assertEquals(new MatchNoDocsQuery(), ft.regexpQuery("f..o", RegExp.ALL, 10, null, null)); + ft.setValue("foo"); + assertEquals(new MatchAllDocsQuery(), ft.regexpQuery("f.o", RegExp.ALL, 10, null, null)); + assertEquals(new MatchNoDocsQuery(), ft.regexpQuery("f..o", RegExp.ALL, 10, null, null)); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlIndexTemplateRegistry.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlIndexTemplateRegistry.java index be52cc9202c78..ae516b4f8ce7a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlIndexTemplateRegistry.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlIndexTemplateRegistry.java @@ -14,6 +14,7 @@ import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.MlConfigIndex; import org.elasticsearch.xpack.core.ml.MlMetaIndex; +import org.elasticsearch.xpack.core.ml.MlStatsIndex; import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; @@ -56,6 +57,8 @@ public class MlIndexTemplateRegistry extends IndexTemplateRegistry { ROOT_RESOURCE_PATH + "inference_index_template.json", Version.CURRENT.id, VERSION_PATTERN, Collections.singletonMap(VERSION_ID_PATTERN, String.valueOf(Version.CURRENT.id))); + private static final IndexTemplateConfig STATS_TEMPLATE = statsTemplate(); + private static IndexTemplateConfig configTemplate() { Map variables = new HashMap<>(); variables.put(VERSION_ID_PATTERN, String.valueOf(Version.CURRENT.id)); @@ -80,6 +83,17 @@ private static IndexTemplateConfig anomalyDetectionResultsTemplate() { variables); } + private static IndexTemplateConfig statsTemplate() { + Map variables = new HashMap<>(); + variables.put(VERSION_ID_PATTERN, String.valueOf(Version.CURRENT.id)); + variables.put("xpack.ml.stats.mappings", MlStatsIndex.mapping()); + + return new IndexTemplateConfig(MlStatsIndex.TEMPLATE_NAME, + ROOT_RESOURCE_PATH + "stats_index_template.json", + Version.CURRENT.id, VERSION_PATTERN, + variables); + } + public MlIndexTemplateRegistry(Settings nodeSettings, ClusterService clusterService, ThreadPool threadPool, Client client, NamedXContentRegistry xContentRegistry) { super(nodeSettings, clusterService, threadPool, client, xContentRegistry); @@ -98,7 +112,8 @@ protected List getTemplateConfigs() { CONFIG_TEMPLATE, INFERENCE_TEMPLATE, META_TEMPLATE, - NOTIFICATIONS_TEMPLATE + NOTIFICATIONS_TEMPLATE, + STATS_TEMPLATE ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java index 08edea3329813..d2c8271cb4f1a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java @@ -7,7 +7,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; @@ -22,20 +21,19 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.sort.SortBuilders; +import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.MlStatsIndex; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.GetDataFrameAnalyticsAction; import org.elasticsearch.xpack.core.ml.action.GetDataFrameAnalyticsStatsAction; @@ -43,20 +41,22 @@ import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsTaskState; +import org.elasticsearch.xpack.core.ml.dataframe.stats.MemoryUsage; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.PhaseProgress; import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsTask; import org.elasticsearch.xpack.ml.dataframe.StoredProgress; import org.elasticsearch.xpack.ml.dataframe.stats.ProgressTracker; +import org.elasticsearch.xpack.ml.dataframe.stats.StatsHolder; +import org.elasticsearch.xpack.ml.utils.persistence.MlParserUtils; -import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; @@ -98,16 +98,20 @@ protected void taskOperation(GetDataFrameAnalyticsStatsAction.Request request, D ActionListener> listener) { logger.debug("Get stats for running task [{}]", task.getParams().getId()); - ActionListener> progressListener = ActionListener.wrap( - progress -> { - Stats stats = buildStats(task.getParams().getId(), progress); + ActionListener statsHolderListener = ActionListener.wrap( + statsHolder -> { + Stats stats = buildStats( + task.getParams().getId(), + statsHolder.getProgressTracker().report(), + statsHolder.getMemoryUsage() + ); listener.onResponse(new QueryPage<>(Collections.singletonList(stats), 1, GetDataFrameAnalyticsAction.Response.RESULTS_FIELD)); }, listener::onFailure ); ActionListener reindexingProgressListener = ActionListener.wrap( - aVoid -> progressListener.onResponse(task.getStatsHolder().getProgressTracker().report()), + aVoid -> statsHolderListener.onResponse(task.getStatsHolder()), listener::onFailure ); @@ -157,22 +161,25 @@ void gatherStatsForStoppedTasks(List expandedIds, GetDataFrameAnalyticsS return; } - searchStoredProgresses(stoppedTasksIds, ActionListener.wrap( - storedProgresses -> { - List stoppedStats = new ArrayList<>(stoppedTasksIds.size()); - for (int i = 0; i < stoppedTasksIds.size(); i++) { - String configId = stoppedTasksIds.get(i); - StoredProgress storedProgress = storedProgresses.get(i); - stoppedStats.add(buildStats(configId, storedProgress.get())); - } - List allTasksStats = new ArrayList<>(runningTasksResponse.getResponse().results()); - allTasksStats.addAll(stoppedStats); - Collections.sort(allTasksStats, Comparator.comparing(Stats::getId)); - listener.onResponse(new GetDataFrameAnalyticsStatsAction.Response(new QueryPage<>( - allTasksStats, allTasksStats.size(), GetDataFrameAnalyticsAction.Response.RESULTS_FIELD))); - }, - listener::onFailure - )); + AtomicInteger counter = new AtomicInteger(stoppedTasksIds.size()); + AtomicArray jobStats = new AtomicArray<>(stoppedTasksIds.size()); + for (int i = 0; i < stoppedTasksIds.size(); i++) { + final int slot = i; + String jobId = stoppedTasksIds.get(i); + searchStats(jobId, ActionListener.wrap( + stats -> { + jobStats.set(slot, stats); + if (counter.decrementAndGet() == 0) { + List allTasksStats = new ArrayList<>(runningTasksResponse.getResponse().results()); + allTasksStats.addAll(jobStats.asList()); + Collections.sort(allTasksStats, Comparator.comparing(Stats::getId)); + listener.onResponse(new GetDataFrameAnalyticsStatsAction.Response(new QueryPage<>( + allTasksStats, allTasksStats.size(), GetDataFrameAnalyticsAction.Response.RESULTS_FIELD))); + } + }, + listener::onFailure) + ); + } } static List determineStoppedTasksIds(List expandedIds, List runningTasksStats) { @@ -180,19 +187,15 @@ static List determineStoppedTasksIds(List expandedIds, List startedTasksIds.contains(id) == false).collect(Collectors.toList()); } - private void searchStoredProgresses(List configIds, ActionListener> listener) { + private void searchStats(String configId, ActionListener listener) { + RetrievedStatsHolder retrievedStatsHolder = new RetrievedStatsHolder(); + MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); - for (String configId : configIds) { - SearchRequest searchRequest = new SearchRequest(AnomalyDetectorsIndex.jobStateIndexPattern()); - searchRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); - searchRequest.source().size(1); - searchRequest.source().query(QueryBuilders.idsQuery().addIds(StoredProgress.documentId(configId))); - multiSearchRequest.add(searchRequest); - } + multiSearchRequest.add(buildStoredProgressSearch(configId)); + multiSearchRequest.add(buildMemoryUsageSearch(configId)); executeAsyncWithOrigin(client, ML_ORIGIN, MultiSearchAction.INSTANCE, multiSearchRequest, ActionListener.wrap( multiSearchResponse -> { - List progresses = new ArrayList<>(configIds.size()); for (MultiSearchResponse.Item itemResponse : multiSearchResponse.getResponses()) { if (itemResponse.isFailure()) { listener.onFailure(ExceptionsHelper.serverError(itemResponse.getFailureMessage(), itemResponse.getFailure())); @@ -200,32 +203,59 @@ private void searchStoredProgresses(List configIds, ActionListener listener.onFailure(ExceptionsHelper.serverError("Error searching for stored progresses", e)) + e -> listener.onFailure(ExceptionsHelper.serverError("Error searching for stats", e)) )); } - private StoredProgress parseStoredProgress(SearchHit hit) { - BytesReference source = hit.getSourceRef(); - try (InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { - StoredProgress storedProgress = StoredProgress.PARSER.apply(parser, null); - return storedProgress; - } catch (IOException e) { - logger.error(new ParameterizedMessage("failed to parse progress from doc with it [{}]", hit.getId()), e); - return new StoredProgress(Collections.emptyList()); + private static SearchRequest buildStoredProgressSearch(String configId) { + SearchRequest searchRequest = new SearchRequest(AnomalyDetectorsIndex.jobStateIndexPattern()); + searchRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); + searchRequest.source().size(1); + searchRequest.source().query(QueryBuilders.idsQuery().addIds(StoredProgress.documentId(configId))); + return searchRequest; + } + + private static SearchRequest buildMemoryUsageSearch(String configId) { + SearchRequest searchRequest = new SearchRequest(MlStatsIndex.indexPattern()); + searchRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); + searchRequest.source().size(1); + QueryBuilder query = QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery(MemoryUsage.JOB_ID.getPreferredName(), configId)) + .filter(QueryBuilders.termQuery(MemoryUsage.TYPE.getPreferredName(), MemoryUsage.TYPE_VALUE)); + searchRequest.source().query(query); + searchRequest.source().sort(SortBuilders.fieldSort(MemoryUsage.TIMESTAMP.getPreferredName()).order(SortOrder.DESC) + // We need this for the search not to fail when there are no mappings yet in the index + .unmappedType("long")); + return searchRequest; + } + + private static void parseHit(SearchHit hit, String configId, RetrievedStatsHolder retrievedStatsHolder) { + String hitId = hit.getId(); + if (StoredProgress.documentId(configId).equals(hitId)) { + retrievedStatsHolder.progress = MlParserUtils.parse(hit, StoredProgress.PARSER); + } else if (hitId.startsWith(MemoryUsage.documentIdPrefix(configId))) { + retrievedStatsHolder.memoryUsage = MlParserUtils.parse(hit, MemoryUsage.LENIENT_PARSER); + } else { + throw ExceptionsHelper.serverError("unexpected doc id [" + hitId + "]"); } } - private GetDataFrameAnalyticsStatsAction.Response.Stats buildStats(String concreteAnalyticsId, List progress) { + private GetDataFrameAnalyticsStatsAction.Response.Stats buildStats(String concreteAnalyticsId, + List progress, + MemoryUsage memoryUsage) { ClusterState clusterState = clusterService.state(); PersistentTasksCustomMetaData tasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); PersistentTasksCustomMetaData.PersistentTask analyticsTask = MlTasks.getDataFrameAnalyticsTask(concreteAnalyticsId, tasks); @@ -242,6 +272,19 @@ private GetDataFrameAnalyticsStatsAction.Response.Stats buildStats(String concre assignmentExplanation = analyticsTask.getAssignment().getExplanation(); } return new GetDataFrameAnalyticsStatsAction.Response.Stats( - concreteAnalyticsId, analyticsState, failureReason, progress, node, assignmentExplanation); + concreteAnalyticsId, + analyticsState, + failureReason, + progress, + memoryUsage, + node, + assignmentExplanation + ); + } + + private static class RetrievedStatsHolder { + + private volatile StoredProgress progress = new StoredProgress(new ProgressTracker().report()); + private volatile MemoryUsage memoryUsage; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java index 8757ec7c4942f..46e3dcaf74561 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java @@ -30,11 +30,13 @@ import org.elasticsearch.script.Script; import org.elasticsearch.tasks.Task; import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.ml.MlStatsIndex; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsTaskState; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.dataframe.extractor.DataFrameDataExtractorFactory; import org.elasticsearch.xpack.ml.dataframe.persistence.DataFrameAnalyticsConfigProvider; @@ -102,15 +104,35 @@ public void execute(DataFrameAnalyticsTask task, DataFrameAnalyticsState current ); // Retrieve configuration - ActionListener stateAliasListener = ActionListener.wrap( + ActionListener statsIndexListener = ActionListener.wrap( aBoolean -> configProvider.get(task.getParams().getId(), configListener), configListener::onFailure ); + // Make sure the stats index and alias exist + ActionListener stateAliasListener = ActionListener.wrap( + aBoolean -> createStatsIndexAndUpdateMappingsIfNecessary(clusterState, statsIndexListener), + configListener::onFailure + ); + // Make sure the state index and alias exist AnomalyDetectorsIndex.createStateIndexAndAliasIfNecessary(client, clusterState, expressionResolver, stateAliasListener); } + private void createStatsIndexAndUpdateMappingsIfNecessary(ClusterState clusterState, ActionListener listener) { + ActionListener createIndexListener = ActionListener.wrap( + aBoolean -> ElasticsearchMappings.addDocMappingIfMissing( + MlStatsIndex.writeAlias(), + MlStatsIndex::mapping, + client, + clusterState, + listener) + , listener::onFailure + ); + + MlStatsIndex.createStatsIndexAndAliasIfNecessary(client, clusterState, expressionResolver, createIndexListener); + } + private void executeStartingJob(DataFrameAnalyticsTask task, DataFrameAnalyticsConfig config) { DataFrameAnalyticsTaskState reindexingState = new DataFrameAnalyticsTaskState(DataFrameAnalyticsState.REINDEXING, task.getAllocationId(), null); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManager.java index da65cd768e556..ee38f2a63d0f4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManager.java @@ -426,7 +426,8 @@ private AnalyticsResultProcessor createResultProcessor(DataFrameAnalyticsTask ta DataFrameRowsJoiner dataFrameRowsJoiner = new DataFrameRowsJoiner(config.getId(), dataExtractorFactory.newExtractor(true), resultsPersisterService); return new AnalyticsResultProcessor( - config, dataFrameRowsJoiner, task.getStatsHolder(), trainedModelProvider, auditor, dataExtractor.get().getFieldNames()); + config, dataFrameRowsJoiner, task.getStatsHolder(), trainedModelProvider, auditor, resultsPersisterService, + dataExtractor.get().getFieldNames()); } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessor.java index 636502dbe0058..cd990ca11ab33 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessor.java @@ -11,24 +11,32 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.license.License; +import org.elasticsearch.xpack.core.ml.MlStatsIndex; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.dataframe.analyses.Classification; import org.elasticsearch.xpack.core.ml.dataframe.analyses.Regression; +import org.elasticsearch.xpack.core.ml.dataframe.stats.MemoryUsage; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelDefinition; import org.elasticsearch.xpack.core.ml.inference.TrainedModelInput; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.ml.dataframe.process.results.AnalyticsResult; import org.elasticsearch.xpack.ml.dataframe.process.results.RowResults; import org.elasticsearch.xpack.ml.dataframe.stats.StatsHolder; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; import org.elasticsearch.xpack.ml.notifications.DataFrameAnalyticsAuditor; +import org.elasticsearch.xpack.ml.utils.persistence.ResultsPersisterService; +import java.io.IOException; import java.time.Instant; import java.util.Collections; import java.util.Iterator; @@ -36,6 +44,7 @@ import java.util.Objects; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static java.util.stream.Collectors.toList; @@ -60,6 +69,7 @@ public class AnalyticsResultProcessor { private final StatsHolder statsHolder; private final TrainedModelProvider trainedModelProvider; private final DataFrameAnalyticsAuditor auditor; + private final ResultsPersisterService resultsPersisterService; private final List fieldNames; private final CountDownLatch completionLatch = new CountDownLatch(1); private volatile String failure; @@ -67,12 +77,14 @@ public class AnalyticsResultProcessor { public AnalyticsResultProcessor(DataFrameAnalyticsConfig analytics, DataFrameRowsJoiner dataFrameRowsJoiner, StatsHolder statsHolder, TrainedModelProvider trainedModelProvider, - DataFrameAnalyticsAuditor auditor, List fieldNames) { + DataFrameAnalyticsAuditor auditor, ResultsPersisterService resultsPersisterService, + List fieldNames) { this.analytics = Objects.requireNonNull(analytics); this.dataFrameRowsJoiner = Objects.requireNonNull(dataFrameRowsJoiner); this.statsHolder = Objects.requireNonNull(statsHolder); this.trainedModelProvider = Objects.requireNonNull(trainedModelProvider); this.auditor = Objects.requireNonNull(auditor); + this.resultsPersisterService = Objects.requireNonNull(resultsPersisterService); this.fieldNames = Collections.unmodifiableList(Objects.requireNonNull(fieldNames)); } @@ -148,6 +160,11 @@ private void processResult(AnalyticsResult result, DataFrameRowsJoiner resultsJo if (inferenceModelBuilder != null) { createAndIndexInferenceModel(inferenceModelBuilder); } + MemoryUsage memoryUsage = result.getMemoryUsage(); + if (memoryUsage != null) { + statsHolder.setMemoryUsage(memoryUsage); + indexStatsResult(memoryUsage, memoryUsage::documentId); + } } private void createAndIndexInferenceModel(TrainedModelDefinition.Builder inferenceModel) { @@ -224,4 +241,23 @@ private void setAndReportFailure(Exception e) { failure = "error processing results; " + e.getMessage(); auditor.error(analytics.getId(), "Error processing results; " + e.getMessage()); } + + private void indexStatsResult(ToXContentObject result, Function docIdSupplier) { + try { + resultsPersisterService.indexWithRetry(analytics.getId(), + MlStatsIndex.writeAlias(), + result, + new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.FOR_INTERNAL_STORAGE, "true")), + WriteRequest.RefreshPolicy.IMMEDIATE, + docIdSupplier.apply(analytics.getId()), + () -> true, + errorMsg -> auditor.error(analytics.getId(), + "failed to persist result with id [" + docIdSupplier.apply(analytics.getId()) + "]; " + errorMsg) + ); + } catch (IOException ioe) { + LOGGER.error(() -> new ParameterizedMessage("[{}] Failed indexing stats result", analytics.getId()), ioe); + } catch (Exception e) { + LOGGER.error(() -> new ParameterizedMessage("[{}] Failed indexing stats result", analytics.getId()), e); + } + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/results/AnalyticsResult.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/results/AnalyticsResult.java index b1c7bf6599a75..fcac851fa13ef 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/results/AnalyticsResult.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/results/AnalyticsResult.java @@ -5,11 +5,13 @@ */ package org.elasticsearch.xpack.ml.dataframe.process.results; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.dataframe.stats.MemoryUsage; import org.elasticsearch.xpack.core.ml.inference.TrainedModelDefinition; import java.io.IOException; @@ -23,29 +25,36 @@ public class AnalyticsResult implements ToXContentObject { public static final ParseField TYPE = new ParseField("analytics_result"); - public static final ParseField PROGRESS_PERCENT = new ParseField("progress_percent"); - public static final ParseField INFERENCE_MODEL = new ParseField("inference_model"); + private static final ParseField PROGRESS_PERCENT = new ParseField("progress_percent"); + private static final ParseField INFERENCE_MODEL = new ParseField("inference_model"); + private static final ParseField ANALYTICS_MEMORY_USAGE = new ParseField("analytics_memory_usage"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(TYPE.getPreferredName(), - a -> new AnalyticsResult((RowResults) a[0], (Integer) a[1], (TrainedModelDefinition.Builder) a[2])); + a -> new AnalyticsResult((RowResults) a[0], (Integer) a[1], (TrainedModelDefinition.Builder) a[2], (MemoryUsage) a[3])); static { PARSER.declareObject(optionalConstructorArg(), RowResults.PARSER, RowResults.TYPE); PARSER.declareInt(optionalConstructorArg(), PROGRESS_PERCENT); // TODO change back to STRICT_PARSER once native side is aligned PARSER.declareObject(optionalConstructorArg(), TrainedModelDefinition.LENIENT_PARSER, INFERENCE_MODEL); + PARSER.declareObject(optionalConstructorArg(), MemoryUsage.STRICT_PARSER, ANALYTICS_MEMORY_USAGE); } private final RowResults rowResults; private final Integer progressPercent; private final TrainedModelDefinition.Builder inferenceModelBuilder; private final TrainedModelDefinition inferenceModel; + private final MemoryUsage memoryUsage; - public AnalyticsResult(RowResults rowResults, Integer progressPercent, TrainedModelDefinition.Builder inferenceModelBuilder) { + public AnalyticsResult(@Nullable RowResults rowResults, + @Nullable Integer progressPercent, + @Nullable TrainedModelDefinition.Builder inferenceModelBuilder, + @Nullable MemoryUsage memoryUsage) { this.rowResults = rowResults; this.progressPercent = progressPercent; this.inferenceModelBuilder = inferenceModelBuilder; this.inferenceModel = inferenceModelBuilder == null ? null : inferenceModelBuilder.build(); + this.memoryUsage = memoryUsage; } public RowResults getRowResults() { @@ -60,6 +69,10 @@ public TrainedModelDefinition.Builder getInferenceModelBuilder() { return inferenceModelBuilder; } + public MemoryUsage getMemoryUsage() { + return memoryUsage; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -74,6 +87,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws inferenceModel, new ToXContent.MapParams(Collections.singletonMap(FOR_INTERNAL_STORAGE, "true"))); } + if (memoryUsage != null) { + builder.field(ANALYTICS_MEMORY_USAGE.getPreferredName(), memoryUsage, params); + } builder.endObject(); return builder; } @@ -90,11 +106,12 @@ public boolean equals(Object other) { AnalyticsResult that = (AnalyticsResult) other; return Objects.equals(rowResults, that.rowResults) && Objects.equals(progressPercent, that.progressPercent) - && Objects.equals(inferenceModel, that.inferenceModel); + && Objects.equals(inferenceModel, that.inferenceModel) + && Objects.equals(memoryUsage, that.memoryUsage); } @Override public int hashCode() { - return Objects.hash(rowResults, progressPercent, inferenceModel); + return Objects.hash(rowResults, progressPercent, inferenceModel, memoryUsage); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/stats/StatsHolder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/stats/StatsHolder.java index ac0396b3e81ca..d2e9bdd957ec6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/stats/StatsHolder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/stats/StatsHolder.java @@ -5,15 +5,33 @@ */ package org.elasticsearch.xpack.ml.dataframe.stats; +import org.elasticsearch.xpack.core.ml.dataframe.stats.MemoryUsage; + +import java.util.concurrent.atomic.AtomicReference; + /** * Holds data frame analytics stats in memory so that they may be retrieved * from the get stats api for started jobs efficiently. */ public class StatsHolder { - private final ProgressTracker progressTracker = new ProgressTracker(); + private final ProgressTracker progressTracker; + private final AtomicReference memoryUsageHolder; + + public StatsHolder() { + progressTracker = new ProgressTracker(); + memoryUsageHolder = new AtomicReference<>(); + } public ProgressTracker getProgressTracker() { return progressTracker; } + + public void setMemoryUsage(MemoryUsage memoryUsage) { + memoryUsageHolder.set(memoryUsage); + } + + public MemoryUsage getMemoryUsage() { + return memoryUsageHolder.get(); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java index d5d85d3fa6088..48e8fc9ecdaa2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java @@ -112,6 +112,7 @@ import org.elasticsearch.xpack.ml.job.persistence.InfluencersQueryBuilder.InfluencersQuery; import org.elasticsearch.xpack.ml.job.process.autodetect.params.AutodetectParams; import org.elasticsearch.xpack.ml.utils.MlIndicesUtils; +import org.elasticsearch.xpack.ml.utils.persistence.MlParserUtils; import java.io.IOException; import java.io.InputStream; @@ -507,7 +508,7 @@ public void datafeedTimingStats(List jobIds, ActionListener T parseSearchHit(SearchHit hit, BiFunction objectParser) { - BytesReference source = hit.getSourceRef(); - try (InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { - return objectParser.apply(parser, null); - } catch (IOException e) { - throw new ElasticsearchParseException("failed to parse " + hit.getId(), e); - } - } - /** * Search for buckets with the parameters in the {@link BucketsQueryBuilder} * Uses the internal client, so runs as the _xpack user @@ -1119,7 +1103,7 @@ private void searchSingleResult(String jobId, String resultDescription, S handler.accept(new Result<>(null, notFoundSupplier.get())); } else if (hits.length == 1) { try { - T result = parseSearchHit(hits[0], objectParser); + T result = MlParserUtils.parse(hits[0], objectParser); handler.accept(new Result<>(hits[0].getIndex(), result)); } catch (Exception e) { errorHandler.accept(e); @@ -1263,7 +1247,7 @@ public void scheduledEvents(ScheduledEventsQueryBuilder query, ActionListener(calendars, response.getHits().getTotalHits().value, Calendar.RESULTS_FIELD)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/cat/RestCatDatafeedsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/cat/RestCatDatafeedsAction.java index f0bbc3f872e1d..e29098cb49031 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/cat/RestCatDatafeedsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/cat/RestCatDatafeedsAction.java @@ -78,9 +78,9 @@ protected Table getTableWithHeader(RestRequest request) { .build()); // Timing stats - table.addCell("bucket.count", + table.addCell("buckets.count", TableColumnAttributeBuilder.builder("bucket count") - .setAliases("bc", "bucketCount") + .setAliases("bc", "bucketsCount") .build()); table.addCell("search.count", TableColumnAttributeBuilder.builder("number of searches ran by the datafeed") diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/cat/RestCatJobsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/cat/RestCatJobsAction.java index dce05eeaa3024..9d67a93af5ce8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/cat/RestCatJobsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/cat/RestCatJobsAction.java @@ -93,7 +93,7 @@ protected Table getTableWithHeader(RestRequest request) { .build()); table.addCell("data.processed_fields", TableColumnAttributeBuilder.builder("number of processed fields", false) - .setAliases("dpr", "dataProcessedFields") + .setAliases("dpf", "dataProcessedFields") .build()); table.addCell("data.input_bytes", TableColumnAttributeBuilder.builder("total input bytes", false) @@ -219,55 +219,55 @@ protected Table getTableWithHeader(RestRequest request) { .build()); // Forecast Stats - table.addCell("forecast." + ForecastStats.Fields.TOTAL, - TableColumnAttributeBuilder.builder("total number of forecasts").setAliases("ft", "forecastTotal").build()); - table.addCell("forecast.memory.min", + table.addCell("forecasts." + ForecastStats.Fields.TOTAL, + TableColumnAttributeBuilder.builder("total number of forecasts").setAliases("ft", "forecastsTotal").build()); + table.addCell("forecasts.memory.min", TableColumnAttributeBuilder.builder("minimum memory used by forecasts", false) - .setAliases("fmmin", "forecastMemoryMin") + .setAliases("fmmin", "forecastsMemoryMin") .build()); - table.addCell("forecast.memory.max", + table.addCell("forecasts.memory.max", TableColumnAttributeBuilder.builder("maximum memory used by forecasts", false) .setAliases("fmmax", "forecastsMemoryMax") .build()); - table.addCell("forecast.memory.avg", + table.addCell("forecasts.memory.avg", TableColumnAttributeBuilder.builder("average memory used by forecasts", false) - .setAliases("fmavg", "forecastMemoryAvg") + .setAliases("fmavg", "forecastsMemoryAvg") .build()); - table.addCell("forecast.memory.total", + table.addCell("forecasts.memory.total", TableColumnAttributeBuilder.builder("total memory used by all forecasts", false) - .setAliases("fmt", "forecastMemoryTotal") + .setAliases("fmt", "forecastsMemoryTotal") .build()); - table.addCell("forecast." + ForecastStats.Fields.RECORDS + ".min", + table.addCell("forecasts." + ForecastStats.Fields.RECORDS + ".min", TableColumnAttributeBuilder.builder("minimum record count for forecasts", false) - .setAliases("frmin", "forecastRecordsMin") + .setAliases("frmin", "forecastsRecordsMin") .build()); - table.addCell("forecast." + ForecastStats.Fields.RECORDS + ".max", + table.addCell("forecasts." + ForecastStats.Fields.RECORDS + ".max", TableColumnAttributeBuilder.builder("maximum record count for forecasts", false) - .setAliases("frmax", "forecastRecordsMax") + .setAliases("frmax", "forecastsRecordsMax") .build()); - table.addCell("forecast." + ForecastStats.Fields.RECORDS + ".avg", + table.addCell("forecasts." + ForecastStats.Fields.RECORDS + ".avg", TableColumnAttributeBuilder.builder("average record count for forecasts", false) - .setAliases("fravg", "forecastRecordsAvg") + .setAliases("fravg", "forecastsRecordsAvg") .build()); - table.addCell("forecast." + ForecastStats.Fields.RECORDS + ".total", + table.addCell("forecasts." + ForecastStats.Fields.RECORDS + ".total", TableColumnAttributeBuilder.builder("total record count for all forecasts", false) - .setAliases("frt", "forecastRecordsTotal") + .setAliases("frt", "forecastsRecordsTotal") .build()); - table.addCell("forecast.time.min", + table.addCell("forecasts.time.min", TableColumnAttributeBuilder.builder("minimum runtime for forecasts", false) - .setAliases("ftmin", "forecastTimeMin") + .setAliases("ftmin", "forecastsTimeMin") .build()); - table.addCell("forecast.time.max", + table.addCell("forecasts.time.max", TableColumnAttributeBuilder.builder("maximum run time for forecasts", false) - .setAliases("ftmax", "forecastTimeMax") + .setAliases("ftmax", "forecastsTimeMax") .build()); - table.addCell("forecast.time.avg", + table.addCell("forecasts.time.avg", TableColumnAttributeBuilder.builder("average runtime for all forecasts (milliseconds)", false) - .setAliases("ftavg", "forecastTimeAvg") + .setAliases("ftavg", "forecastsTimeAvg") .build()); - table.addCell("forecast.time.total", + table.addCell("forecasts.time.total", TableColumnAttributeBuilder.builder("total runtime for all forecasts", false) - .setAliases("ftt", "forecastTimeTotal").build()); + .setAliases("ftt", "forecastsTimeTotal").build()); //Node info table.addCell("node.id", @@ -288,29 +288,29 @@ protected Table getTableWithHeader(RestRequest request) { .build()); //Timing Stats - table.addCell("bucket.count", + table.addCell("buckets.count", TableColumnAttributeBuilder.builder("bucket count") - .setAliases("bc", "bucketCount") + .setAliases("bc", "bucketsCount") .build()); - table.addCell("bucket.time.total", + table.addCell("buckets.time.total", TableColumnAttributeBuilder.builder("total bucket processing time", false) - .setAliases("btt", "bucketTimeTotal") + .setAliases("btt", "bucketsTimeTotal") .build()); - table.addCell("bucket.time.min", + table.addCell("buckets.time.min", TableColumnAttributeBuilder.builder("minimum bucket processing time", false) - .setAliases("btmin", "bucketTimeMin") + .setAliases("btmin", "bucketsTimeMin") .build()); - table.addCell("bucket.time.max", + table.addCell("buckets.time.max", TableColumnAttributeBuilder.builder("maximum bucket processing time", false) - .setAliases("btmax", "bucketTimeMax") + .setAliases("btmax", "bucketsTimeMax") .build()); - table.addCell("bucket.time.exp_avg", + table.addCell("buckets.time.exp_avg", TableColumnAttributeBuilder.builder("exponential average bucket processing time (milliseconds)", false) - .setAliases("btea", "bucketTimeExpAvg") + .setAliases("btea", "bucketsTimeExpAvg") .build()); - table.addCell("bucket.time.exp_avg_hour", + table.addCell("buckets.time.exp_avg_hour", TableColumnAttributeBuilder.builder("exponential average bucket processing time by hour (milliseconds)", false) - .setAliases("bteah", "bucketTimeExpAvgHour") + .setAliases("bteah", "bucketsTimeExpAvgHour") .build()); table.endHeaders(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/MlParserUtils.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/MlParserUtils.java new file mode 100644 index 0000000000000..5585972ad8404 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/MlParserUtils.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.utils.persistence; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.search.SearchHit; + +import java.io.IOException; +import java.io.InputStream; +import java.util.function.BiFunction; + +public final class MlParserUtils { + + private MlParserUtils() {} + + /** + * @param hit The search hit to parse + * @param objectParser Parser for the object of type T + * @return The parsed value of T from the search hit + * @throws ElasticsearchException on failure + */ + public static T parse(SearchHit hit, BiFunction objectParser) { + BytesReference source = hit.getSourceRef(); + try (InputStream stream = source.streamInput(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + return objectParser.apply(parser, null); + } catch (IOException e) { + throw new ElasticsearchParseException("failed to parse " + hit.getId(), e); + } + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManagerTests.java index 84a75506ab4a1..329e7c552d453 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManagerTests.java @@ -53,7 +53,7 @@ public class AnalyticsProcessManagerTests extends ESTestCase { private static final String CONFIG_ID = "config-id"; private static final int NUM_ROWS = 100; private static final int NUM_COLS = 4; - private static final AnalyticsResult PROCESS_RESULT = new AnalyticsResult(null, null, null); + private static final AnalyticsResult PROCESS_RESULT = new AnalyticsResult(null, null, null, null); private Client client; private DataFrameAnalyticsAuditor auditor; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessorTests.java index a93e3f4b0f126..044a06efbb2ce 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessorTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.xpack.ml.extractor.ExtractedFields; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; import org.elasticsearch.xpack.ml.notifications.DataFrameAnalyticsAuditor; +import org.elasticsearch.xpack.ml.utils.persistence.ResultsPersisterService; import org.junit.Before; import org.mockito.ArgumentCaptor; import org.mockito.InOrder; @@ -61,6 +62,7 @@ public class AnalyticsResultProcessorTests extends ESTestCase { private StatsHolder statsHolder = new StatsHolder(); private TrainedModelProvider trainedModelProvider; private DataFrameAnalyticsAuditor auditor; + private ResultsPersisterService resultsPersisterService; private DataFrameAnalyticsConfig analyticsConfig; @Before @@ -70,6 +72,7 @@ public void setUpMocks() { dataFrameRowsJoiner = mock(DataFrameRowsJoiner.class); trainedModelProvider = mock(TrainedModelProvider.class); auditor = mock(DataFrameAnalyticsAuditor.class); + resultsPersisterService = mock(ResultsPersisterService.class); analyticsConfig = new DataFrameAnalyticsConfig.Builder() .setId(JOB_ID) .setDescription(JOB_DESCRIPTION) @@ -93,7 +96,7 @@ public void testProcess_GivenNoResults() { public void testProcess_GivenEmptyResults() { givenDataFrameRows(2); - givenProcessResults(Arrays.asList(new AnalyticsResult(null, 50, null), new AnalyticsResult(null, 100, null))); + givenProcessResults(Arrays.asList(new AnalyticsResult(null, 50, null, null), new AnalyticsResult(null, 100, null, null))); AnalyticsResultProcessor resultProcessor = createResultProcessor(); resultProcessor.process(process); @@ -108,7 +111,8 @@ public void testProcess_GivenRowResults() { givenDataFrameRows(2); RowResults rowResults1 = mock(RowResults.class); RowResults rowResults2 = mock(RowResults.class); - givenProcessResults(Arrays.asList(new AnalyticsResult(rowResults1, 50, null), new AnalyticsResult(rowResults2, 100, null))); + givenProcessResults(Arrays.asList(new AnalyticsResult(rowResults1, 50, null, null), + new AnalyticsResult(rowResults2, 100, null, null))); AnalyticsResultProcessor resultProcessor = createResultProcessor(); resultProcessor.process(process); @@ -125,7 +129,8 @@ public void testProcess_GivenDataFrameRowsJoinerFails() { givenDataFrameRows(2); RowResults rowResults1 = mock(RowResults.class); RowResults rowResults2 = mock(RowResults.class); - givenProcessResults(Arrays.asList(new AnalyticsResult(rowResults1, 50, null), new AnalyticsResult(rowResults2, 100, null))); + givenProcessResults(Arrays.asList(new AnalyticsResult(rowResults1, 50, null, null), + new AnalyticsResult(rowResults2, 100, null, null))); doThrow(new RuntimeException("some failure")).when(dataFrameRowsJoiner).processRowResults(any(RowResults.class)); @@ -155,7 +160,7 @@ public void testProcess_GivenInferenceModelIsStoredSuccessfully() { List expectedFieldNames = Arrays.asList("foo", "bar", "baz"); TrainedModelDefinition.Builder inferenceModel = TrainedModelDefinitionTests.createRandomBuilder(); - givenProcessResults(Arrays.asList(new AnalyticsResult(null, null, inferenceModel))); + givenProcessResults(Arrays.asList(new AnalyticsResult(null, null, inferenceModel, null))); AnalyticsResultProcessor resultProcessor = createResultProcessor(expectedFieldNames); resultProcessor.process(process); @@ -199,7 +204,7 @@ public void testProcess_GivenInferenceModelFailedToStore() { }).when(trainedModelProvider).storeTrainedModel(any(TrainedModelConfig.class), any(ActionListener.class)); TrainedModelDefinition.Builder inferenceModel = TrainedModelDefinitionTests.createRandomBuilder(); - givenProcessResults(Arrays.asList(new AnalyticsResult(null, null, inferenceModel))); + givenProcessResults(Arrays.asList(new AnalyticsResult(null, null, inferenceModel, null))); AnalyticsResultProcessor resultProcessor = createResultProcessor(); resultProcessor.process(process); @@ -232,6 +237,6 @@ private AnalyticsResultProcessor createResultProcessor() { private AnalyticsResultProcessor createResultProcessor(List fieldNames) { return new AnalyticsResultProcessor( - analyticsConfig, dataFrameRowsJoiner, statsHolder, trainedModelProvider, auditor, fieldNames); + analyticsConfig, dataFrameRowsJoiner, statsHolder, trainedModelProvider, auditor, resultsPersisterService, fieldNames); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/results/AnalyticsResultTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/results/AnalyticsResultTests.java index 1758659bf3c20..3b949d84ed0ef 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/results/AnalyticsResultTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/results/AnalyticsResultTests.java @@ -7,12 +7,15 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.xpack.core.ml.dataframe.stats.MemoryUsageTests; import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; import org.elasticsearch.xpack.core.ml.inference.TrainedModelDefinition; import org.elasticsearch.xpack.core.ml.inference.TrainedModelDefinitionTests; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import java.util.ArrayList; import java.util.Collections; @@ -42,7 +45,7 @@ protected AnalyticsResult createTestInstance() { if (randomBoolean()) { inferenceModel = TrainedModelDefinitionTests.createRandomBuilder(); } - return new AnalyticsResult(rowResults, progressPercent, inferenceModel); + return new AnalyticsResult(rowResults, progressPercent, inferenceModel, MemoryUsageTests.createRandom()); } @Override @@ -50,6 +53,11 @@ protected AnalyticsResult doParseInstance(XContentParser parser) { return AnalyticsResult.PARSER.apply(parser, null); } + @Override + protected ToXContent.Params getToXContentParams() { + return new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.FOR_INTERNAL_STORAGE, "true")); + } + @Override protected boolean supportsUnknownFields() { return false; diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java index b5f3e38f23688..5d003dd80bf08 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java @@ -289,7 +289,7 @@ public void resolveAsMergedMapping(String indexWildcard, String javaRegex, boole public static IndexResolution mergedMappings(DataTypeRegistry typeRegistry, String indexPattern, String[] indexNames, Map> fieldCaps) { - if (fieldCaps == null || fieldCaps.isEmpty()) { + if (indexNames.length == 0) { return IndexResolution.notFound(indexPattern); } diff --git a/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/tree/NodeSubclassTests.java b/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/tree/NodeSubclassTests.java index 3101199425821..74574974bab90 100644 --- a/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/tree/NodeSubclassTests.java +++ b/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/tree/NodeSubclassTests.java @@ -24,6 +24,8 @@ import org.elasticsearch.xpack.ql.expression.gen.processor.ConstantProcessor; import org.elasticsearch.xpack.ql.expression.gen.processor.Processor; import org.elasticsearch.xpack.ql.expression.predicate.fulltext.FullTextPredicate; +import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.In; +import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.InPipe; import org.elasticsearch.xpack.ql.expression.predicate.regex.Like; import org.elasticsearch.xpack.ql.expression.predicate.regex.LikePattern; import org.elasticsearch.xpack.ql.tree.NodeTests.ChildrenAreAProperty; @@ -56,6 +58,7 @@ import java.util.jar.JarEntry; import java.util.jar.JarInputStream; +import static java.util.Arrays.asList; import static java.util.Collections.emptyList; import static java.util.stream.Collectors.toList; import static org.mockito.Mockito.mock; @@ -85,6 +88,8 @@ * */ public class NodeSubclassTests> extends ESTestCase { + + private static final List> CLASSES_WITH_MIN_TWO_CHILDREN = asList(In.class, InPipe.class); private final Class subclass; @@ -563,7 +568,7 @@ private int randomSizeForCollection(Class> toBuildClass) { } protected boolean hasAtLeastTwoChildren(Class> toBuildClass) { - return false; + return CLASSES_WITH_MIN_TWO_CHILDREN.stream().anyMatch(toBuildClass::equals); } private List makeListOfSameSizeOtherThan(Type listType, List original) throws Exception { diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/ErrorsTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/ErrorsTestCase.java index c5ae7f63ad06d..68ae50a429927 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/ErrorsTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/ErrorsTestCase.java @@ -13,7 +13,9 @@ public interface ErrorsTestCase { void testSelectInvalidSql() throws Exception; void testSelectFromMissingIndex() throws Exception; - void testSelectFromIndexWithoutTypes() throws Exception; + void testSelectColumnFromMissingIndex() throws Exception; + void testSelectFromEmptyIndex() throws Exception; + void testSelectColumnFromEmptyIndex() throws Exception; void testSelectMissingField() throws Exception; void testSelectMissingFunction() throws Exception; void testSelectProjectScoreInAggContext() throws Exception; diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/ErrorsTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/ErrorsTestCase.java index 2bef0cd60ae16..6e2ebc87dbe94 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/ErrorsTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/ErrorsTestCase.java @@ -37,15 +37,30 @@ public void testSelectFromMissingIndex() throws IOException { } @Override - public void testSelectFromIndexWithoutTypes() throws Exception { + public void testSelectColumnFromMissingIndex() throws Exception { + assertFoundOneProblem(command("SELECT abc FROM test")); + assertEquals("line 1:17: Unknown index [test]" + END, readLine()); + } + + @Override + public void testSelectFromEmptyIndex() throws Exception { // Create an index without any types Request request = new Request("PUT", "/test"); request.setJsonEntity("{}"); client().performRequest(request); assertFoundOneProblem(command("SELECT * FROM test")); - //assertEquals("line 1:15: [test] doesn't have any types so it is incompatible with sql" + END, readLine()); - assertEquals("line 1:15: Unknown index [test]" + END, readLine()); + assertEquals("line 1:8: Cannot determine columns for [*]" + END, readLine()); + } + + @Override + public void testSelectColumnFromEmptyIndex() throws Exception { + Request request = new Request("PUT", "/test"); + request.setJsonEntity("{}"); + client().performRequest(request); + + assertFoundOneProblem(command("SELECT abc FROM test")); + assertEquals("line 1:8: Unknown column [abc]" + END, readLine()); } @Override diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ErrorsTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ErrorsTestCase.java index 87de389e9be01..9f7eca0c90aeb 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ErrorsTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ErrorsTestCase.java @@ -33,7 +33,15 @@ public void testSelectFromMissingIndex() throws SQLException { } @Override - public void testSelectFromIndexWithoutTypes() throws Exception { + public void testSelectColumnFromMissingIndex() throws Exception { + try (Connection c = esJdbc()) { + SQLException e = expectThrows(SQLException.class, () -> c.prepareStatement("SELECT abc FROM test").executeQuery()); + assertEquals("Found 1 problem\nline 1:17: Unknown index [test]", e.getMessage()); + } + } + + @Override + public void testSelectFromEmptyIndex() throws Exception { // Create an index without any types Request request = new Request("PUT", "/test"); request.setJsonEntity("{}"); @@ -41,9 +49,19 @@ public void testSelectFromIndexWithoutTypes() throws Exception { try (Connection c = esJdbc()) { SQLException e = expectThrows(SQLException.class, () -> c.prepareStatement("SELECT * FROM test").executeQuery()); - // see https://github.com/elastic/elasticsearch/issues/34719 - //assertEquals("Found 1 problem\nline 1:15: [test] doesn't have any types so it is incompatible with sql", e.getMessage()); - assertEquals("Found 1 problem\nline 1:15: Unknown index [test]", e.getMessage()); + assertEquals("Found 1 problem\nline 1:8: Cannot determine columns for [*]", e.getMessage()); + } + } + + @Override + public void testSelectColumnFromEmptyIndex() throws Exception { + Request request = new Request("PUT", "/test"); + request.setJsonEntity("{}"); + client().performRequest(request); + + try (Connection c = esJdbc()) { + SQLException e = expectThrows(SQLException.class, () -> c.prepareStatement("SELECT abc FROM test").executeQuery()); + assertEquals("Found 1 problem\nline 1:8: Unknown column [abc]", e.getMessage()); } } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java index 79cf33a087ef5..946a1a9a8cac6 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java @@ -317,16 +317,28 @@ public void testSelectFromMissingIndex() { } @Override - public void testSelectFromIndexWithoutTypes() throws Exception { + public void testSelectColumnFromMissingIndex() throws Exception { + String mode = randomFrom("jdbc", "plain"); + expectBadRequest(() -> runSql(mode, "SELECT abc FROM missing"), containsString("1:17: Unknown index [missing]")); + } + + @Override + public void testSelectFromEmptyIndex() throws Exception { // Create an index without any types Request request = new Request("PUT", "/test"); request.setJsonEntity("{}"); client().performRequest(request); String mode = randomFrom("jdbc", "plain"); - expectBadRequest(() -> runSql(mode, "SELECT * FROM test"), - // see https://github.com/elastic/elasticsearch/issues/34719 - //containsString("1:15: [test] doesn't have any types so it is incompatible with sql")); - containsString("1:15: Unknown index [test]")); + expectBadRequest(() -> runSql(mode, "SELECT * FROM test"), containsString("1:8: Cannot determine columns for [*]")); + } + + @Override + public void testSelectColumnFromEmptyIndex() throws Exception { + Request request = new Request("PUT", "/test"); + request.setJsonEntity("{}"); + client().performRequest(request); + String mode = randomFrom("jdbc", "plain"); + expectBadRequest(() -> runSql(mode, "SELECT abc FROM test"), containsString("1:8: Unknown column [abc]")); } @Override diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.5.0-snapshot-b01d7cb.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.5.0-snapshot-b01d7cb.jar.sha1 deleted file mode 100644 index 06a1050ba8e0e..0000000000000 --- a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.5.0-snapshot-b01d7cb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9c54e267bfa2cd1ef904dc6e35b38bbedda4c4b1 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.5.0-snapshot-c4475920b08.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.5.0-snapshot-c4475920b08.jar.sha1 new file mode 100644 index 0000000000000..3fd9e819ce181 --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.5.0-snapshot-c4475920b08.jar.sha1 @@ -0,0 +1 @@ +ca406661129d35008411365d2b6e747dc39378af \ No newline at end of file diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/cat.ml_jobs.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/cat.ml_jobs.json index d773e6bbf5ec3..f62a46ba34133 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/cat.ml_jobs.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/cat.ml_jobs.json @@ -1,7 +1,7 @@ { "cat.ml_jobs":{ "documentation":{ - "url":"http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-stats.html" + "url":"http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-anomaly-detectors.html" }, "stability":"stable", "url":{ diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/constant_keyword/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/constant_keyword/10_basic.yml new file mode 100644 index 0000000000000..0635de2add546 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/constant_keyword/10_basic.yml @@ -0,0 +1,182 @@ +setup: + + - skip: + version: " - 7.99.99" # TODO: make it 7.6.99 after backport + reason: "constant_keyword was added in 7.7" + + - do: + indices.create: + index: test1 + body: + mappings: + properties: + foo: + type: constant_keyword + value: bar + + - do: + indices.create: + index: test2 + body: + mappings: + properties: + foo: + type: constant_keyword + value: baz + + - do: + index: + index: test1 + id: 1 + body: {} + + - do: + index: + index: test1 + id: 2 + body: { "foo": "bar" } + + - do: + index: + index: test2 + id: 1 + body: {} + + - do: + indices.refresh: {} + +--- +"Exist query": + + - do: + search: + index: test* + body: + size: 0 + query: + exists: + field: foo + + - match: { "hits.total.value": 3 } + + +--- +"Term query": + + - do: + search: + index: test* + pre_filter_shard_size: 1 + body: + size: 0 + query: + term: + foo: bar + + - match: { "hits.total.value": 2 } + - match: { _shards.skipped : 1} + + - do: + search: + index: test* + pre_filter_shard_size: 1 + body: + size: 0 + query: + term: + foo: baz + + - match: { "hits.total.value": 1 } + - match: { _shards.skipped : 1} + +--- +"Terms query": + + - do: + search: + index: test* + pre_filter_shard_size: 1 + body: + size: 0 + query: + terms: + foo: [bar, quux] + + - match: { "hits.total.value": 2 } + - match: { _shards.skipped : 1} + +--- +"Prefix query": + + - do: + search: + index: test* + body: + size: 0 + query: + prefix: + foo: ba + + - match: { "hits.total.value": 3 } + + - do: + search: + index: test* + pre_filter_shard_size: 1 + body: + size: 0 + query: + prefix: + foo: baz + + - match: { "hits.total.value": 1 } + - match: { _shards.skipped : 1} + +--- +"Wildcard query": + + - do: + search: + index: test* + pre_filter_shard_size: 1 + body: + size: 0 + query: + wildcard: + foo: "*r*" + + - match: { "hits.total.value": 2 } + - match: { _shards.skipped : 1} + +--- +"Terms agg": + + - do: + search: + index: test* + body: + size: 0 + aggs: + foo_terms: + terms: + field: foo + + - match: { aggregations.foo_terms.buckets.0.key: "bar" } + - match: { aggregations.foo_terms.buckets.0.doc_count: 2 } + - match: { aggregations.foo_terms.buckets.1.key: "baz" } + - match: { aggregations.foo_terms.buckets.1.doc_count: 1 } + - length: { aggregations.foo_terms.buckets: 2 } + +--- +"Sort": + + - do: + search: + index: test* + body: + sort: [ { foo: asc } ] + + - match: { "hits.total.value": 3 } + - match: {hits.hits.0._index: test1 } + - match: {hits.hits.1._index: test1 } + - match: {hits.hits.2._index: test2 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/constant_keyword/20_dynamic_mapping.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/constant_keyword/20_dynamic_mapping.yml new file mode 100644 index 0000000000000..4357b4ecf171a --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/constant_keyword/20_dynamic_mapping.yml @@ -0,0 +1,82 @@ +--- +"Dynamic mappings": + + - do: + indices.create: + index: test1 + body: + mappings: + properties: + foo: + type: constant_keyword + + - do: + index: + index: test1 + id: 1 + body: {} + + - do: + indices.get_mapping: + index: test1 + + - match: { test1.mappings.properties.foo.type: constant_keyword } + - is_false: test1.mappings.properties.foo.value + + - do: + index: + index: test1 + id: 1 + body: {} + + - do: + indices.refresh: {} + + - do: + indices.get_mapping: + index: test1 + + - match: { test1.mappings.properties.foo.type: constant_keyword } + - is_false: test1.mappings.properties.foo.value + + - do: + search: + index: test1 + body: + size: 0 + query: + term: + foo: + value: bar + + - match: { hits.total.value: 0 } + + - do: + search: + index: test1 + body: + size: 0 + aggs: + foo_terms: + terms: + field: foo + + - match: { hits.total.value: 1 } + - length: { aggregations.foo_terms.buckets: 0 } + + - do: + index: + index: test1 + id: 1 + body: + foo: bar + + - do: + indices.refresh: {} + + - do: + indices.get_mapping: + index: test1 + + - match: { test1.mappings.properties.foo.type: constant_keyword } + - match: { test1.mappings.properties.foo.value: bar } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeed_cat_apis.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeed_cat_apis.yml index 4437a31b5fdc1..89274c59884c2 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeed_cat_apis.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeed_cat_apis.yml @@ -86,7 +86,7 @@ setup: datafeed_id: datafeed-job-stats-test - match: $body: | - / #id state bucket.count search.count + / #id state buckets.count search.count ^ (datafeed\-job\-stats\-test \s+ \w+ \s+ \d+ \s+ \d+ \n)+ $/ - do: @@ -95,7 +95,7 @@ setup: datafeed_id: datafeed-job-stats-test - match: $body: | - /^ id \s+ state \s+ bucket\.count \s+ search\.count \n + /^ id \s+ state \s+ buckets\.count \s+ search\.count \n (datafeed\-job\-stats\-test \s+ \w+ \s+ \d+ \s+ \d+ \n)+ $/ - do: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/job_cat_apis.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/job_cat_apis.yml index bb13c3a5cc540..a82ce20032094 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/job_cat_apis.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/job_cat_apis.yml @@ -90,7 +90,7 @@ setup: job_id: job-stats-test - match: $body: | - / #id state data.processed_records model.bytes model.memory_status forecast.total bucket.count + / #id state data.processed_records model.bytes model.memory_status forecasts.total buckets.count ^ (job\-stats\-test \s+ \w+ \s+ \d+ \s+ .*? \s+ \w+ \s+ \d+ \s+ \d+ \n)+ $/ - do: @@ -99,7 +99,7 @@ setup: job_id: job-stats-test - match: $body: | - /^ id \s+ state \s+ data\.processed_records \s+ model\.bytes \s+ model\.memory_status \s+ forecast\.total \s+ bucket\.count \n + /^ id \s+ state \s+ data\.processed_records \s+ model\.bytes \s+ model\.memory_status \s+ forecasts\.total \s+ buckets\.count \n (job\-stats\-test \s+ \w+ \s+ \d+ \s+ .*? \s+ \w+ \s+ \d+ \s+ \d+ \n)+ $/ - do: diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java index e787a253a5f2d..912ac73c1254d 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java @@ -14,12 +14,14 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsModule; @@ -39,6 +41,7 @@ import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.XPackSettings; @@ -137,6 +140,23 @@ public class Transform extends Plugin implements SystemIndexPlugin, PersistentTa Setting.Property.Dynamic ); + /** + * Node attributes for transform, automatically created and retrievable via cluster state. + * These attributes should never be set directly, use the node setting counter parts instead. + */ + public static final String TRANSFORM_ENABLED_NODE_ATTR = "transform.node"; + public static final String TRANSFORM_REMOTE_ENABLED_NODE_ATTR = "transform.remote_connect"; + + /** + * Setting whether transform (the coordinator task) can run on this node and REST API's are available, + * respects xpack.transform.enabled (for the whole plugin) as fallback + */ + public static final Setting TRANSFORM_ENABLED_NODE = Setting.boolSetting( + "node.transform", + settings -> Boolean.toString(XPackSettings.TRANSFORM_ENABLED.get(settings) && DiscoveryNode.isDataNode(settings)), + Property.NodeScope + ); + public Transform(Settings settings) { this.settings = settings; this.enabled = XPackSettings.TRANSFORM_ENABLED.get(settings); @@ -222,8 +242,14 @@ public List> getExecutorBuilders(Settings settings) { return emptyList(); } - FixedExecutorBuilder indexing = new FixedExecutorBuilder(settings, TASK_THREAD_POOL_NAME, 4, 4, "transform.task_thread_pool", - false); + FixedExecutorBuilder indexing = new FixedExecutorBuilder( + settings, + TASK_THREAD_POOL_NAME, + 4, + 4, + "transform.task_thread_pool", + false + ); return Collections.singletonList(indexing); } @@ -296,13 +322,44 @@ public List> getPersistentTasksExecutor( // the transform services should have been created assert transformServices.get() != null; - return Collections.singletonList(new TransformPersistentTasksExecutor(client, transformServices.get(), threadPool, clusterService, - settingsModule.getSettings(), expressionResolver)); + return Collections.singletonList( + new TransformPersistentTasksExecutor( + client, + transformServices.get(), + threadPool, + clusterService, + settingsModule.getSettings(), + expressionResolver + ) + ); } @Override public List> getSettings() { - return Collections.singletonList(NUM_FAILURE_RETRIES_SETTING); + return Collections.unmodifiableList(Arrays.asList(TRANSFORM_ENABLED_NODE, NUM_FAILURE_RETRIES_SETTING)); + } + + @Override + public Settings additionalSettings() { + String transformEnabledNodeAttribute = "node.attr." + TRANSFORM_ENABLED_NODE_ATTR; + String transformRemoteEnabledNodeAttribute = "node.attr." + TRANSFORM_REMOTE_ENABLED_NODE_ATTR; + + if (settings.get(transformEnabledNodeAttribute) != null || settings.get(transformRemoteEnabledNodeAttribute) != null) { + throw new IllegalArgumentException( + "Directly setting transform node attributes is not permitted, please use the documented node settings instead" + ); + } + + if (enabled == false) { + return Settings.EMPTY; + } + + Settings.Builder additionalSettings = Settings.builder(); + + additionalSettings.put(transformEnabledNodeAttribute, TRANSFORM_ENABLED_NODE.get(settings)); + additionalSettings.put(transformRemoteEnabledNodeAttribute, RemoteClusterService.ENABLE_REMOTE_CLUSTERS.get(settings)); + + return additionalSettings.build(); } @Override diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformUsageTransportAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformUsageTransportAction.java index 219cb9e06b326..19e26053b27b5 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformUsageTransportAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformUsageTransportAction.java @@ -54,19 +54,36 @@ public class TransformUsageTransportAction extends XPackUsageFeatureTransportAct private final Client client; @Inject - public TransformUsageTransportAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - Settings settings, XPackLicenseState licenseState, Client client) { - super(XPackUsageFeatureAction.TRANSFORM.name(), transportService, clusterService, - threadPool, actionFilters, indexNameExpressionResolver); + public TransformUsageTransportAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + Settings settings, + XPackLicenseState licenseState, + Client client + ) { + super( + XPackUsageFeatureAction.TRANSFORM.name(), + transportService, + clusterService, + threadPool, + actionFilters, + indexNameExpressionResolver + ); this.enabled = XPackSettings.TRANSFORM_ENABLED.get(settings); this.licenseState = licenseState; this.client = client; } @Override - protected void masterOperation(Task task, XPackUsageRequest request, ClusterState state, - ActionListener listener) { + protected void masterOperation( + Task task, + XPackUsageRequest request, + ClusterState state, + ActionListener listener + ) { boolean available = licenseState.isTransformAllowed(); if (enabled == false) { var usage = new TransformFeatureSetUsage(available, enabled, Collections.emptyMap(), new TransformIndexerStats()); @@ -75,61 +92,66 @@ protected void masterOperation(Task task, XPackUsageRequest request, ClusterStat } PersistentTasksCustomMetaData taskMetadata = PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(state); - Collection> transformTasks = taskMetadata == null ? - Collections.emptyList() : - taskMetadata.findTasks(TransformTaskParams.NAME, (t) -> true); + Collection> transformTasks = taskMetadata == null + ? Collections.emptyList() + : taskMetadata.findTasks(TransformTaskParams.NAME, (t) -> true); final int taskCount = transformTasks.size(); final Map transformsCountByState = new HashMap<>(); - for(PersistentTasksCustomMetaData.PersistentTask transformTask : transformTasks) { - TransformState transformState = (TransformState)transformTask.getState(); - transformsCountByState.merge(transformState.getTaskState().value(), 1L, Long::sum); + for (PersistentTasksCustomMetaData.PersistentTask transformTask : transformTasks) { + TransformState transformState = (TransformState) transformTask.getState(); + TransformTaskState taskState = transformState.getTaskState(); + if (taskState != null) { + transformsCountByState.merge(taskState.value(), 1L, Long::sum); + } } - ActionListener totalStatsListener = ActionListener.wrap( - statSummations -> { - var usage = new TransformFeatureSetUsage(available, enabled, transformsCountByState, statSummations); - listener.onResponse(new XPackUsageFeatureResponse(usage)); - }, - listener::onFailure - ); + ActionListener totalStatsListener = ActionListener.wrap(statSummations -> { + var usage = new TransformFeatureSetUsage(available, enabled, transformsCountByState, statSummations); + listener.onResponse(new XPackUsageFeatureResponse(usage)); + }, listener::onFailure); - ActionListener totalTransformCountListener = ActionListener.wrap( - transformCountSuccess -> { - if (transformCountSuccess.getShardFailures().length > 0) { - logger.error("total transform count search returned shard failures: {}", - Arrays.toString(transformCountSuccess.getShardFailures())); - } - long totalTransforms = transformCountSuccess.getHits().getTotalHits().value; - if (totalTransforms == 0) { - var usage = new TransformFeatureSetUsage(available, enabled, transformsCountByState, - new TransformIndexerStats()); - listener.onResponse(new XPackUsageFeatureResponse(usage)); - return; - } - transformsCountByState.merge(TransformTaskState.STOPPED.value(), totalTransforms - taskCount, Long::sum); + ActionListener totalTransformCountListener = ActionListener.wrap(transformCountSuccess -> { + if (transformCountSuccess.getShardFailures().length > 0) { + logger.error( + "total transform count search returned shard failures: {}", + Arrays.toString(transformCountSuccess.getShardFailures()) + ); + } + long totalTransforms = transformCountSuccess.getHits().getTotalHits().value; + if (totalTransforms == 0) { + var usage = new TransformFeatureSetUsage(available, enabled, transformsCountByState, new TransformIndexerStats()); + listener.onResponse(new XPackUsageFeatureResponse(usage)); + return; + } + transformsCountByState.merge(TransformTaskState.STOPPED.value(), totalTransforms - taskCount, Long::sum); + TransformInfoTransportAction.getStatisticSummations(client, totalStatsListener); + }, transformCountFailure -> { + if (transformCountFailure instanceof ResourceNotFoundException) { TransformInfoTransportAction.getStatisticSummations(client, totalStatsListener); - }, - transformCountFailure -> { - if (transformCountFailure instanceof ResourceNotFoundException) { - TransformInfoTransportAction.getStatisticSummations(client, totalStatsListener); - } else { - listener.onFailure(transformCountFailure); - } + } else { + listener.onFailure(transformCountFailure); } - ); + }); - SearchRequest totalTransformCount = client - .prepareSearch(TransformInternalIndexConstants.INDEX_NAME_PATTERN, - TransformInternalIndexConstants.INDEX_NAME_PATTERN_DEPRECATED) + SearchRequest totalTransformCount = client.prepareSearch( + TransformInternalIndexConstants.INDEX_NAME_PATTERN, + TransformInternalIndexConstants.INDEX_NAME_PATTERN_DEPRECATED + ) .setTrackTotalHits(true) - .setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.boolQuery() - .filter(QueryBuilders.termQuery(TransformField.INDEX_DOC_TYPE.getPreferredName(), TransformConfig.NAME)))) + .setQuery( + QueryBuilders.constantScoreQuery( + QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery(TransformField.INDEX_DOC_TYPE.getPreferredName(), TransformConfig.NAME)) + ) + ) .request(); - ClientHelper.executeAsyncWithOrigin(client.threadPool().getThreadContext(), + ClientHelper.executeAsyncWithOrigin( + client.threadPool().getThreadContext(), ClientHelper.TRANSFORM_ORIGIN, totalTransformCount, totalTransformCountListener, - client::search); + client::search + ); } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformStatsAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformStatsAction.java index f77ef92483af2..fb7046ae2d5e2 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformStatsAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformStatsAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; @@ -189,7 +190,7 @@ static TransformStats deriveStats(TransformTask task, @Nullable TransformCheckpo && derivedState.equals(TransformStats.State.STOPPED) == false && derivedState.equals(TransformStats.State.FAILED) == false) { derivedState = TransformStats.State.STOPPING; - reason = reason.isEmpty() ? "transform is set to stop at the next checkpoint" : reason; + reason = Strings.isNullOrEmpty(reason) ? "transform is set to stop at the next checkpoint" : reason; } return new TransformStats( task.getTransformId(), diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java index ee3e42246650c..be5dd13aee314 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java @@ -265,7 +265,9 @@ protected void masterOperation( ); return; } - transformTaskHolder.set(createTransform(config.getId(), config.getVersion(), config.getFrequency())); + transformTaskHolder.set( + createTransform(config.getId(), config.getVersion(), config.getFrequency(), config.getSource().requiresRemoteCluster()) + ); transformConfigHolder.set(config); if (config.getDestination().getPipeline() != null) { if (ingestService.getPipeline(config.getDestination().getPipeline()) == null) { @@ -311,8 +313,13 @@ protected ClusterBlockException checkBlock(StartTransformAction.Request request, return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } - private static TransformTaskParams createTransform(String transformId, Version transformVersion, TimeValue frequency) { - return new TransformTaskParams(transformId, transformVersion, frequency); + private static TransformTaskParams createTransform( + String transformId, + Version transformVersion, + TimeValue frequency, + Boolean requiresRemoteCluster + ) { + return new TransformTaskParams(transformId, transformVersion, frequency, requiresRemoteCluster); } @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java index db752d2d10093..56db82f256553 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java @@ -11,6 +11,7 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.support.IndicesOptions; @@ -49,8 +50,10 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.TreeMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; public class TransformPersistentTasksExecutor extends PersistentTasksExecutor { @@ -100,9 +103,88 @@ public PersistentTasksCustomMetaData.Assignment getAssignment(TransformTaskParam } DiscoveryNode discoveryNode = selectLeastLoadedNode( clusterState, - (node) -> node.isDataNode() && node.getVersion().onOrAfter(params.getVersion()) + (node) -> node.getVersion().onOrAfter(Version.V_8_0_0) + ? nodeCanRunThisTransform(node, params, null) + : nodeCanRunThisTransformPre77(node, params, null) ); - return discoveryNode == null ? NO_NODE_FOUND : new PersistentTasksCustomMetaData.Assignment(discoveryNode.getId(), ""); + + if (discoveryNode == null) { + Map explainWhyAssignmentFailed = new TreeMap<>(); + for (DiscoveryNode node : clusterState.getNodes()) { + if (node.getVersion().onOrAfter(Version.V_8_0_0)) { // todo: V_7_7_0, remove from 8.0 + nodeCanRunThisTransform(node, params, explainWhyAssignmentFailed); + } else { + nodeCanRunThisTransformPre77(node, params, explainWhyAssignmentFailed); + } + } + String reason = "Not starting transform [" + + params.getId() + + "], reasons [" + + explainWhyAssignmentFailed.entrySet().stream().map(e -> e.getKey() + ":" + e.getValue()).collect(Collectors.joining("|")) + + "]"; + + logger.debug(reason); + return new PersistentTasksCustomMetaData.Assignment(null, reason); + } + + return new PersistentTasksCustomMetaData.Assignment(discoveryNode.getId(), ""); + } + + // todo: this can be removed for 8.0 after backport + public static boolean nodeCanRunThisTransformPre77(DiscoveryNode node, TransformTaskParams params, Map explain) { + if (node.isDataNode() == false) { + if (explain != null) { + explain.put(node.getId(), "not a data node"); + } + return false; + } + + // version of the transform run on a node that has at least the same version + if (node.getVersion().onOrAfter(params.getVersion()) == false) { + if (explain != null) { + explain.put( + node.getId(), + "node has version: " + node.getVersion() + " but transform requires at least " + params.getVersion() + ); + } + return false; + } + + return true; + } + + public static boolean nodeCanRunThisTransform(DiscoveryNode node, TransformTaskParams params, Map explain) { + // version of the transform run on a node that has at least the same version + if (node.getVersion().onOrAfter(params.getVersion()) == false) { + if (explain != null) { + explain.put( + node.getId(), + "node has version: " + node.getVersion() + " but transform requires at least " + params.getVersion() + ); + } + return false; + } + + final Map nodeAttributes = node.getAttributes(); + + // transform enabled? + if (Boolean.parseBoolean(nodeAttributes.get(Transform.TRANSFORM_ENABLED_NODE_ATTR)) == false) { + if (explain != null) { + explain.put(node.getId(), "not a transform node"); + } + return false; + } + + // does the transform require a remote and remote is enabled? + if (params.requiresRemote() && Boolean.parseBoolean(nodeAttributes.get(Transform.TRANSFORM_REMOTE_ENABLED_NODE_ATTR)) == false) { + if (explain != null) { + explain.put(node.getId(), "transform requires a remote connection but remote is disabled"); + } + return false; + } + + // we found no reason that the transform can not run on this node + return true; } static List verifyIndicesPrimaryShardsAreActive(ClusterState clusterState, IndexNameExpressionResolver resolver) { diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/TransformTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/TransformTests.java new file mode 100644 index 0000000000000..fdfd5a5a1c247 --- /dev/null +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/TransformTests.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; + +public class TransformTests extends ESTestCase { + + public void testNodeAttributes() { + Settings.Builder builder = Settings.builder(); + boolean transformEnabled = randomBoolean(); + boolean transformPluginEnabled = randomBoolean(); + boolean remoteEnabled = randomBoolean(); + + // randomly use explicit or default setting + if ((transformEnabled && randomBoolean()) == false) { + builder.put("node.transform", transformEnabled); + } + + // randomly use explicit or default setting + if ((remoteEnabled && randomBoolean()) == false) { + builder.put("cluster.remote.connect", remoteEnabled); + } + + if (transformPluginEnabled == false) { + builder.put("xpack.transform.enabled", transformPluginEnabled); + } + + builder.put("node.attr.some_other_attrib", "value"); + Transform transform = createTransform(builder.build()); + assertNotNull(transform.additionalSettings()); + assertEquals( + transformPluginEnabled && transformEnabled, + Boolean.parseBoolean(transform.additionalSettings().get("node.attr.transform.node")) + ); + assertEquals( + transformPluginEnabled && remoteEnabled, + Boolean.parseBoolean(transform.additionalSettings().get("node.attr.transform.remote_connect")) + ); + } + + public void testNodeAttributesDirectlyGiven() { + Settings.Builder builder = Settings.builder(); + + if (randomBoolean()) { + builder.put("node.attr.transform.node", randomBoolean()); + } else { + builder.put("node.attr.transform.remote_connect", randomBoolean()); + } + + Transform transform = createTransform(builder.build()); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> transform.additionalSettings()); + assertThat( + e.getMessage(), + equalTo("Directly setting transform node attributes is not permitted, please use the documented node settings instead") + ); + } + + private Transform createTransform(Settings settings) { + XPackLicenseState licenseState = mock(XPackLicenseState.class); + + return new Transform(settings) { + @Override + protected XPackLicenseState getLicenseState() { + return licenseState; + } + }; + } + +} diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformNodesTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformNodesTests.java index 2ce3d8f882a27..61c04bd0088cc 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformNodesTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformNodesTests.java @@ -30,38 +30,43 @@ public void testTransformNodes() { String transformIdBar = "df-id-bar"; PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); - tasksBuilder.addTask(transformIdFoo, - TransformField.TASK_NAME, new TransformTaskParams(transformIdFoo, Version.CURRENT, null), - new PersistentTasksCustomMetaData.Assignment("node-1", "test assignment")); - tasksBuilder.addTask(transformIdBar, - TransformField.TASK_NAME, new TransformTaskParams(transformIdBar, Version.CURRENT, null), - new PersistentTasksCustomMetaData.Assignment("node-2", "test assignment")); + tasksBuilder.addTask( + transformIdFoo, + TransformField.TASK_NAME, + new TransformTaskParams(transformIdFoo, Version.CURRENT, null, false), + new PersistentTasksCustomMetaData.Assignment("node-1", "test assignment") + ); + tasksBuilder.addTask( + transformIdBar, + TransformField.TASK_NAME, + new TransformTaskParams(transformIdBar, Version.CURRENT, null, false), + new PersistentTasksCustomMetaData.Assignment("node-2", "test assignment") + ); tasksBuilder.addTask("test-task1", "testTasks", new PersistentTaskParams() { - @Override - public String getWriteableName() { - return "testTasks"; - } + @Override + public String getWriteableName() { + return "testTasks"; + } - @Override - public Version getMinimalSupportedVersion() { - return null; - } + @Override + public Version getMinimalSupportedVersion() { + return null; + } - @Override - public void writeTo(StreamOutput out) { + @Override + public void writeTo(StreamOutput out) { - } + } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) { - return null; - } - }, - new PersistentTasksCustomMetaData.Assignment("node-3", "test assignment")); + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) { + return null; + } + }, new PersistentTasksCustomMetaData.Assignment("node-3", "test assignment")); ClusterState cs = ClusterState.builder(new ClusterName("_name")) - .metaData(MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())) - .build(); + .metaData(MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())) + .build(); String[] nodes = TransformNodes.transformTaskNodes(Arrays.asList(transformIdFoo, transformIdBar), cs); assertEquals(2, nodes.length); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportGetTransformStatsActionTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportGetTransformStatsActionTests.java index 7dbe2b4f3a2a1..28b5d11633afe 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportGetTransformStatsActionTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportGetTransformStatsActionTests.java @@ -28,101 +28,159 @@ public class TransportGetTransformStatsActionTests extends ESTestCase { public void testDeriveStatsStopped() { String transformId = "transform-with-stats"; - String reason = ""; + String reason = null; TransformIndexerStats stats = TransformIndexerStatsTests.randomStats(); - TransformState stoppedState = - new TransformState(TransformTaskState.STOPPED, IndexerState.STOPPED, null, 0, reason, null, null, true); + TransformState stoppedState = new TransformState( + TransformTaskState.STOPPED, + IndexerState.STOPPED, + null, + 0, + reason, + null, + null, + true + ); withIdStateAndStats(transformId, stoppedState, stats); TransformCheckpointingInfo info = new TransformCheckpointingInfo( new TransformCheckpointStats(1, null, null, 1, 1), new TransformCheckpointStats(2, null, null, 2, 5), 2, - Instant.now()); - - assertThat(TransportGetTransformStatsAction.deriveStats(task, null), - equalTo(new TransformStats(transformId, TransformStats.State.STOPPED, "", null, stats, TransformCheckpointingInfo.EMPTY))); - assertThat(TransportGetTransformStatsAction.deriveStats(task, info), - equalTo(new TransformStats(transformId, TransformStats.State.STOPPED, "", null, stats, info))); - + Instant.now() + ); + + assertThat( + TransportGetTransformStatsAction.deriveStats(task, null), + equalTo(new TransformStats(transformId, TransformStats.State.STOPPED, reason, null, stats, TransformCheckpointingInfo.EMPTY)) + ); + assertThat( + TransportGetTransformStatsAction.deriveStats(task, info), + equalTo(new TransformStats(transformId, TransformStats.State.STOPPED, reason, null, stats, info)) + ); reason = "foo"; stoppedState = new TransformState(TransformTaskState.STOPPED, IndexerState.STOPPED, null, 0, reason, null, null, true); withIdStateAndStats(transformId, stoppedState, stats); - assertThat(TransportGetTransformStatsAction.deriveStats(task, null), - equalTo(new TransformStats(transformId, TransformStats.State.STOPPED, reason, null, stats, TransformCheckpointingInfo.EMPTY))); - assertThat(TransportGetTransformStatsAction.deriveStats(task, info), - equalTo(new TransformStats(transformId, TransformStats.State.STOPPED, reason, null, stats, info))); + assertThat( + TransportGetTransformStatsAction.deriveStats(task, null), + equalTo(new TransformStats(transformId, TransformStats.State.STOPPED, reason, null, stats, TransformCheckpointingInfo.EMPTY)) + ); + assertThat( + TransportGetTransformStatsAction.deriveStats(task, info), + equalTo(new TransformStats(transformId, TransformStats.State.STOPPED, reason, null, stats, info)) + ); } public void testDeriveStatsFailed() { String transformId = "transform-with-stats"; - String reason = ""; + String reason = null; TransformIndexerStats stats = TransformIndexerStatsTests.randomStats(); - TransformState failedState = - new TransformState(TransformTaskState.FAILED, IndexerState.STOPPED, null, 0, reason, null, null, true); + TransformState failedState = new TransformState(TransformTaskState.FAILED, IndexerState.STOPPED, null, 0, reason, null, null, true); withIdStateAndStats(transformId, failedState, stats); TransformCheckpointingInfo info = new TransformCheckpointingInfo( new TransformCheckpointStats(1, null, null, 1, 1), new TransformCheckpointStats(2, null, null, 2, 5), 2, - Instant.now()); - - assertThat(TransportGetTransformStatsAction.deriveStats(task, null), - equalTo(new TransformStats(transformId, TransformStats.State.FAILED, "", null, stats, TransformCheckpointingInfo.EMPTY))); - assertThat(TransportGetTransformStatsAction.deriveStats(task, info), - equalTo(new TransformStats(transformId, TransformStats.State.FAILED, "", null, stats, info))); - + Instant.now() + ); + + assertThat( + TransportGetTransformStatsAction.deriveStats(task, null), + equalTo(new TransformStats(transformId, TransformStats.State.FAILED, reason, null, stats, TransformCheckpointingInfo.EMPTY)) + ); + assertThat( + TransportGetTransformStatsAction.deriveStats(task, info), + equalTo(new TransformStats(transformId, TransformStats.State.FAILED, reason, null, stats, info)) + ); reason = "the task is failed"; failedState = new TransformState(TransformTaskState.FAILED, IndexerState.STOPPED, null, 0, reason, null, null, true); withIdStateAndStats(transformId, failedState, stats); - assertThat(TransportGetTransformStatsAction.deriveStats(task, null), - equalTo(new TransformStats(transformId, TransformStats.State.FAILED, reason, null, stats, TransformCheckpointingInfo.EMPTY))); - assertThat(TransportGetTransformStatsAction.deriveStats(task, info), - equalTo(new TransformStats(transformId, TransformStats.State.FAILED, reason, null, stats, info))); + assertThat( + TransportGetTransformStatsAction.deriveStats(task, null), + equalTo(new TransformStats(transformId, TransformStats.State.FAILED, reason, null, stats, TransformCheckpointingInfo.EMPTY)) + ); + assertThat( + TransportGetTransformStatsAction.deriveStats(task, info), + equalTo(new TransformStats(transformId, TransformStats.State.FAILED, reason, null, stats, info)) + ); } - public void testDeriveStats() { String transformId = "transform-with-stats"; - String reason = ""; + String reason = null; TransformIndexerStats stats = TransformIndexerStatsTests.randomStats(); - TransformState runningState = - new TransformState(TransformTaskState.STARTED, IndexerState.INDEXING, null, 0, reason, null, null, true); + TransformState runningState = new TransformState( + TransformTaskState.STARTED, + IndexerState.INDEXING, + null, + 0, + reason, + null, + null, + true + ); withIdStateAndStats(transformId, runningState, stats); TransformCheckpointingInfo info = new TransformCheckpointingInfo( new TransformCheckpointStats(1, null, null, 1, 1), new TransformCheckpointStats(2, null, null, 2, 5), 2, - Instant.now()); - - assertThat(TransportGetTransformStatsAction.deriveStats(task, null), - equalTo(new TransformStats(transformId, TransformStats.State.STOPPING, - "transform is set to stop at the next checkpoint", null, stats, TransformCheckpointingInfo.EMPTY))); - assertThat(TransportGetTransformStatsAction.deriveStats(task, info), - equalTo(new TransformStats(transformId, TransformStats.State.STOPPING, - "transform is set to stop at the next checkpoint", null, stats, info))); - + Instant.now() + ); + + assertThat( + TransportGetTransformStatsAction.deriveStats(task, null), + equalTo( + new TransformStats( + transformId, + TransformStats.State.STOPPING, + "transform is set to stop at the next checkpoint", + null, + stats, + TransformCheckpointingInfo.EMPTY + ) + ) + ); + assertThat( + TransportGetTransformStatsAction.deriveStats(task, info), + equalTo( + new TransformStats( + transformId, + TransformStats.State.STOPPING, + "transform is set to stop at the next checkpoint", + null, + stats, + info + ) + ) + ); reason = "foo"; runningState = new TransformState(TransformTaskState.STARTED, IndexerState.INDEXING, null, 0, reason, null, null, true); withIdStateAndStats(transformId, runningState, stats); - assertThat(TransportGetTransformStatsAction.deriveStats(task, null), - equalTo(new TransformStats(transformId, TransformStats.State.STOPPING, reason, null, stats, TransformCheckpointingInfo.EMPTY))); - assertThat(TransportGetTransformStatsAction.deriveStats(task, info), - equalTo(new TransformStats(transformId, TransformStats.State.STOPPING, reason, null, stats, info))); + assertThat( + TransportGetTransformStatsAction.deriveStats(task, null), + equalTo(new TransformStats(transformId, TransformStats.State.STOPPING, reason, null, stats, TransformCheckpointingInfo.EMPTY)) + ); + assertThat( + TransportGetTransformStatsAction.deriveStats(task, info), + equalTo(new TransformStats(transformId, TransformStats.State.STOPPING, reason, null, stats, info)) + ); // Stop at next checkpoint is false. runningState = new TransformState(TransformTaskState.STARTED, IndexerState.INDEXING, null, 0, reason, null, null, false); withIdStateAndStats(transformId, runningState, stats); - assertThat(TransportGetTransformStatsAction.deriveStats(task, null), - equalTo(new TransformStats(transformId, TransformStats.State.INDEXING, reason, null, stats, TransformCheckpointingInfo.EMPTY))); - assertThat(TransportGetTransformStatsAction.deriveStats(task, info), - equalTo(new TransformStats(transformId, TransformStats.State.INDEXING, reason, null, stats, info))); + assertThat( + TransportGetTransformStatsAction.deriveStats(task, null), + equalTo(new TransformStats(transformId, TransformStats.State.INDEXING, reason, null, stats, TransformCheckpointingInfo.EMPTY)) + ); + assertThat( + TransportGetTransformStatsAction.deriveStats(task, info), + equalTo(new TransformStats(transformId, TransformStats.State.INDEXING, reason, null, stats, info)) + ); } private void withIdStateAndStats(String transformId, TransformState state, TransformIndexerStats stats) { diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportStopTransformActionTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportStopTransformActionTests.java index 0ca86c3657f62..311f4a9b6f0fb 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportStopTransformActionTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportStopTransformActionTests.java @@ -17,8 +17,8 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.transform.TransformMessages; -import org.elasticsearch.xpack.core.transform.transforms.TransformTaskParams; import org.elasticsearch.xpack.core.transform.transforms.TransformState; +import org.elasticsearch.xpack.core.transform.transforms.TransformTaskParams; import org.elasticsearch.xpack.core.transform.transforms.TransformTaskState; import java.util.ArrayList; @@ -48,35 +48,35 @@ public void testTaskStateValidationWithNoTasks() { public void testTaskStateValidationWithTransformTasks() { // Test with the task state being null PersistentTasksCustomMetaData.Builder pTasksBuilder = PersistentTasksCustomMetaData.builder() - .addTask("non-failed-task", + .addTask( + "non-failed-task", TransformTaskParams.NAME, - new TransformTaskParams("transform-task-1", Version.CURRENT, null), - new PersistentTasksCustomMetaData.Assignment("current-data-node-with-1-tasks", "")); + new TransformTaskParams("transform-task-1", Version.CURRENT, null, false), + new PersistentTasksCustomMetaData.Assignment("current-data-node-with-1-tasks", "") + ); ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")).metaData(buildMetadata(pTasksBuilder.build())); TransportStopTransformAction.validateTaskState(csBuilder.build(), Collections.singletonList("non-failed-task"), false); // test again with a non failed task but this time it has internal state - pTasksBuilder.updateTaskState("non-failed-task", new TransformState(TransformTaskState.STOPPED, - IndexerState.STOPPED, - null, - 0L, - null, - null)); + pTasksBuilder.updateTaskState( + "non-failed-task", + new TransformState(TransformTaskState.STOPPED, IndexerState.STOPPED, null, 0L, null, null) + ); csBuilder = ClusterState.builder(new ClusterName("_name")).metaData(buildMetadata(pTasksBuilder.build())); TransportStopTransformAction.validateTaskState(csBuilder.build(), Collections.singletonList("non-failed-task"), false); - pTasksBuilder.addTask("failed-task", + pTasksBuilder.addTask( + "failed-task", TransformTaskParams.NAME, - new TransformTaskParams("transform-task-1", Version.CURRENT, null), - new PersistentTasksCustomMetaData.Assignment("current-data-node-with-1-tasks", "")) - .updateTaskState("failed-task", new TransformState(TransformTaskState.FAILED, - IndexerState.STOPPED, - null, - 0L, - "task has failed", - null)); + new TransformTaskParams("transform-task-1", Version.CURRENT, null, false), + new PersistentTasksCustomMetaData.Assignment("current-data-node-with-1-tasks", "") + ) + .updateTaskState( + "failed-task", + new TransformState(TransformTaskState.FAILED, IndexerState.STOPPED, null, 0L, "task has failed", null) + ); csBuilder = ClusterState.builder(new ClusterName("_name")).metaData(buildMetadata(pTasksBuilder.build())); TransportStopTransformAction.validateTaskState(csBuilder.build(), Arrays.asList("non-failed-task", "failed-task"), true); @@ -84,51 +84,59 @@ public void testTaskStateValidationWithTransformTasks() { TransportStopTransformAction.validateTaskState(csBuilder.build(), Collections.singletonList("non-failed-task"), false); ClusterState.Builder csBuilderFinal = ClusterState.builder(new ClusterName("_name")).metaData(buildMetadata(pTasksBuilder.build())); - ElasticsearchStatusException ex = expectThrows(ElasticsearchStatusException.class, - () -> TransportStopTransformAction.validateTaskState(csBuilderFinal.build(), - Collections.singletonList("failed-task"), - false)); + ElasticsearchStatusException ex = expectThrows( + ElasticsearchStatusException.class, + () -> TransportStopTransformAction.validateTaskState(csBuilderFinal.build(), Collections.singletonList("failed-task"), false) + ); assertThat(ex.status(), equalTo(CONFLICT)); - assertThat(ex.getMessage(), - equalTo(TransformMessages.getMessage(TransformMessages.CANNOT_STOP_FAILED_TRANSFORM, - "failed-task", - "task has failed"))); + assertThat( + ex.getMessage(), + equalTo(TransformMessages.getMessage(TransformMessages.CANNOT_STOP_FAILED_TRANSFORM, "failed-task", "task has failed")) + ); } public void testFirstNotOKStatus() { List nodeFailures = new ArrayList<>(); List taskOperationFailures = new ArrayList<>(); - nodeFailures.add(new ElasticsearchException("nodefailure", - new ElasticsearchStatusException("failure", RestStatus.UNPROCESSABLE_ENTITY))); - taskOperationFailures.add(new TaskOperationFailure("node", - 1, - new ElasticsearchStatusException("failure", RestStatus.BAD_REQUEST))); - - assertThat(TransportStopTransformAction.firstNotOKStatus(Collections.emptyList(), Collections.emptyList()), - equalTo(RestStatus.INTERNAL_SERVER_ERROR)); - - assertThat(TransportStopTransformAction.firstNotOKStatus(taskOperationFailures, Collections.emptyList()), - equalTo(RestStatus.BAD_REQUEST)); - assertThat(TransportStopTransformAction.firstNotOKStatus(taskOperationFailures, nodeFailures), - equalTo(RestStatus.BAD_REQUEST)); - assertThat(TransportStopTransformAction.firstNotOKStatus(taskOperationFailures, - Collections.singletonList(new ElasticsearchException(new ElasticsearchStatusException("not failure", RestStatus.OK)))), - equalTo(RestStatus.BAD_REQUEST)); - - assertThat(TransportStopTransformAction.firstNotOKStatus( - Collections.singletonList(new TaskOperationFailure( - "node", - 1, - new ElasticsearchStatusException("not failure", RestStatus.OK))), - nodeFailures), - equalTo(RestStatus.INTERNAL_SERVER_ERROR)); - - assertThat(TransportStopTransformAction.firstNotOKStatus( - Collections.emptyList(), - nodeFailures), - equalTo(RestStatus.INTERNAL_SERVER_ERROR)); + nodeFailures.add( + new ElasticsearchException("nodefailure", new ElasticsearchStatusException("failure", RestStatus.UNPROCESSABLE_ENTITY)) + ); + taskOperationFailures.add(new TaskOperationFailure("node", 1, new ElasticsearchStatusException("failure", RestStatus.BAD_REQUEST))); + + assertThat( + TransportStopTransformAction.firstNotOKStatus(Collections.emptyList(), Collections.emptyList()), + equalTo(RestStatus.INTERNAL_SERVER_ERROR) + ); + + assertThat( + TransportStopTransformAction.firstNotOKStatus(taskOperationFailures, Collections.emptyList()), + equalTo(RestStatus.BAD_REQUEST) + ); + assertThat(TransportStopTransformAction.firstNotOKStatus(taskOperationFailures, nodeFailures), equalTo(RestStatus.BAD_REQUEST)); + assertThat( + TransportStopTransformAction.firstNotOKStatus( + taskOperationFailures, + Collections.singletonList(new ElasticsearchException(new ElasticsearchStatusException("not failure", RestStatus.OK))) + ), + equalTo(RestStatus.BAD_REQUEST) + ); + + assertThat( + TransportStopTransformAction.firstNotOKStatus( + Collections.singletonList( + new TaskOperationFailure("node", 1, new ElasticsearchStatusException("not failure", RestStatus.OK)) + ), + nodeFailures + ), + equalTo(RestStatus.INTERNAL_SERVER_ERROR) + ); + + assertThat( + TransportStopTransformAction.firstNotOKStatus(Collections.emptyList(), nodeFailures), + equalTo(RestStatus.INTERNAL_SERVER_ERROR) + ); } public void testBuildException() { @@ -136,13 +144,16 @@ public void testBuildException() { List taskOperationFailures = new ArrayList<>(); nodeFailures.add(new ElasticsearchException("node failure")); - taskOperationFailures.add(new TaskOperationFailure("node", - 1, - new ElasticsearchStatusException("task failure", RestStatus.BAD_REQUEST))); + taskOperationFailures.add( + new TaskOperationFailure("node", 1, new ElasticsearchStatusException("task failure", RestStatus.BAD_REQUEST)) + ); RestStatus status = CONFLICT; - ElasticsearchStatusException statusException = - TransportStopTransformAction.buildException(taskOperationFailures, nodeFailures, status); + ElasticsearchStatusException statusException = TransportStopTransformAction.buildException( + taskOperationFailures, + nodeFailures, + status + ); assertThat(statusException.status(), equalTo(status)); assertThat(statusException.getMessage(), equalTo(taskOperationFailures.get(0).getCause().getMessage())); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java index f34e1a2b78535..117bfc4d661da 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; @@ -41,8 +42,12 @@ import org.elasticsearch.xpack.transform.persistence.TransformInternalIndexTests; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import static org.hamcrest.Matchers.equalTo; @@ -52,114 +57,115 @@ public class TransformPersistentTasksExecutorTests extends ESTestCase { public void testNodeVersionAssignment() { - MetaData.Builder metaData = MetaData.builder(); - RoutingTable.Builder routingTable = RoutingTable.builder(); - addIndices(metaData, routingTable); - PersistentTasksCustomMetaData.Builder pTasksBuilder = PersistentTasksCustomMetaData.builder() - .addTask( - "transform-task-1", - TransformTaskParams.NAME, - new TransformTaskParams("transform-task-1", Version.CURRENT, null), - new PersistentTasksCustomMetaData.Assignment("current-data-node-with-1-tasks", "") - ) - .addTask( - "transform-task-2", - TransformTaskParams.NAME, - new TransformTaskParams("transform-task-2", Version.CURRENT, null), - new PersistentTasksCustomMetaData.Assignment("current-data-node-with-2-tasks", "") - ) - .addTask( - "transform-task-3", - TransformTaskParams.NAME, - new TransformTaskParams("transform-task-3", Version.CURRENT, null), - new PersistentTasksCustomMetaData.Assignment("current-data-node-with-2-tasks", "") - ); + DiscoveryNodes.Builder nodes = buildNodes(false, true, true, true, true); + ClusterState cs = buildClusterState(nodes); + TransformPersistentTasksExecutor executor = buildTaskExecutor(); - PersistentTasksCustomMetaData pTasks = pTasksBuilder.build(); + assertThat( + executor.getAssignment(new TransformTaskParams("new-task-id", Version.CURRENT, null, true), cs).getExecutorNode(), + equalTo("current-data-node-with-1-tasks") + ); + assertThat( + executor.getAssignment(new TransformTaskParams("new-task-id", Version.CURRENT, null, false), cs).getExecutorNode(), + equalTo("current-data-node-with-0-tasks-transform-remote-disabled") + ); + assertThat( + executor.getAssignment(new TransformTaskParams("new-old-task-id", Version.V_7_5_0, null, true), cs).getExecutorNode(), + equalTo("past-data-node-1") + ); + } - metaData.putCustom(PersistentTasksCustomMetaData.TYPE, pTasks); + public void testNodeAssignmentProblems() { + // no data nodes + DiscoveryNodes.Builder nodes = buildNodes(false, false, false, false, true); + ClusterState cs = buildClusterState(nodes); + TransformPersistentTasksExecutor executor = buildTaskExecutor(); - DiscoveryNodes.Builder nodes = DiscoveryNodes.builder() - .add( - new DiscoveryNode( - "past-data-node-1", - buildNewFakeTransportAddress(), - Collections.emptyMap(), - Set.of(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.MASTER_ROLE), - Version.V_7_2_0 - ) - ) - .add( - new DiscoveryNode( - "current-data-node-with-2-tasks", - buildNewFakeTransportAddress(), - Collections.emptyMap(), - Set.of(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.MASTER_ROLE), - Version.CURRENT - ) - ) - .add( - new DiscoveryNode( - "non-data-node-1", - buildNewFakeTransportAddress(), - Collections.emptyMap(), - Set.of(DiscoveryNodeRole.MASTER_ROLE), - Version.CURRENT - ) - ) - .add( - new DiscoveryNode( - "current-data-node-with-1-tasks", - buildNewFakeTransportAddress(), - Collections.emptyMap(), - Set.of(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.MASTER_ROLE), - Version.CURRENT - ) - ); + Assignment assignment = executor.getAssignment(new TransformTaskParams("new-task-id", Version.CURRENT, null, false), cs); + assertNull(assignment.getExecutorNode()); + assertThat( + assignment.getExplanation(), + equalTo("Not starting transform [new-task-id], reasons [current-data-node-with-transform-disabled:not a transform node]") + ); - ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")).nodes(nodes); - csBuilder.routingTable(routingTable.build()); - csBuilder.metaData(metaData); + // dedicated transform node + nodes = buildNodes(true, false, false, false, true); + cs = buildClusterState(nodes); + executor = buildTaskExecutor(); - ClusterState cs = csBuilder.build(); - Client client = mock(Client.class); - TransformAuditor mockAuditor = mock(TransformAuditor.class); - IndexBasedTransformConfigManager transformsConfigManager = new IndexBasedTransformConfigManager(client, xContentRegistry()); - TransformCheckpointService transformCheckpointService = new TransformCheckpointService( - client, - Settings.EMPTY, - new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null), - transformsConfigManager, - mockAuditor - ); - TransformServices transformServices = new TransformServices( - transformsConfigManager, - transformCheckpointService, - mockAuditor, - mock(SchedulerEngine.class) - ); + assignment = executor.getAssignment(new TransformTaskParams("new-task-id", Version.CURRENT, null, false), cs); + assertNotNull(assignment.getExecutorNode()); + assertThat(assignment.getExecutorNode(), equalTo("dedicated-transform-node")); - ClusterSettings cSettings = new ClusterSettings(Settings.EMPTY, Collections.singleton(Transform.NUM_FAILURE_RETRIES_SETTING)); - ClusterService clusterService = mock(ClusterService.class); - when(clusterService.getClusterSettings()).thenReturn(cSettings); - when(clusterService.state()).thenReturn(TransformInternalIndexTests.STATE_WITH_LATEST_VERSIONED_INDEX_TEMPLATE); - TransformPersistentTasksExecutor executor = new TransformPersistentTasksExecutor( - client, - transformServices, - mock(ThreadPool.class), - clusterService, - Settings.EMPTY, - new IndexNameExpressionResolver() + // only an old node + nodes = buildNodes(false, true, false, false, true); + cs = buildClusterState(nodes); + executor = buildTaskExecutor(); + + assignment = executor.getAssignment(new TransformTaskParams("new-task-id", Version.V_7_7_0, null, false), cs); + assertNull(assignment.getExecutorNode()); + assertThat( + assignment.getExplanation(), + equalTo( + "Not starting transform [new-task-id], reasons [" + + "current-data-node-with-transform-disabled:not a transform node" + + "|" + + "past-data-node-1:node has version: 7.5.0 but transform requires at least 7.7.0" + + "]" + ) ); + assignment = executor.getAssignment(new TransformTaskParams("new-task-id", Version.V_7_5_0, null, false), cs); + assertNotNull(assignment.getExecutorNode()); + assertThat(assignment.getExecutorNode(), equalTo("past-data-node-1")); + + // no remote + nodes = buildNodes(false, false, false, true, false); + cs = buildClusterState(nodes); + executor = buildTaskExecutor(); + + assignment = executor.getAssignment(new TransformTaskParams("new-task-id", Version.V_7_5_0, null, true), cs); + assertNull(assignment.getExecutorNode()); assertThat( - executor.getAssignment(new TransformTaskParams("new-task-id", Version.CURRENT, null), cs).getExecutorNode(), - equalTo("current-data-node-with-1-tasks") + assignment.getExplanation(), + equalTo( + "Not starting transform [new-task-id], reasons [" + + "current-data-node-with-0-tasks-transform-remote-disabled:" + + "transform requires a remote connection but remote is disabled" + + "]" + ) ); + + assignment = executor.getAssignment(new TransformTaskParams("new-task-id", Version.CURRENT, null, false), cs); + assertNotNull(assignment.getExecutorNode()); + assertThat(assignment.getExecutorNode(), equalTo("current-data-node-with-0-tasks-transform-remote-disabled")); + + // no remote and disabled + nodes = buildNodes(false, false, false, true, true); + cs = buildClusterState(nodes); + executor = buildTaskExecutor(); + + assignment = executor.getAssignment(new TransformTaskParams("new-task-id", Version.V_7_5_0, null, true), cs); + assertNull(assignment.getExecutorNode()); assertThat( - executor.getAssignment(new TransformTaskParams("new-old-task-id", Version.V_7_2_0, null), cs).getExecutorNode(), - equalTo("past-data-node-1") + assignment.getExplanation(), + equalTo( + "Not starting transform [new-task-id], reasons [" + + "current-data-node-with-0-tasks-transform-remote-disabled:" + + "transform requires a remote connection but remote is disabled" + + "|" + + "current-data-node-with-transform-disabled:not a transform node" + + "]" + ) ); + // old node, we do not know if remote is enabled + nodes = buildNodes(false, true, false, true, false); + cs = buildClusterState(nodes); + executor = buildTaskExecutor(); + + assignment = executor.getAssignment(new TransformTaskParams("new-task-id", Version.V_7_5_0, null, true), cs); + assertNotNull(assignment.getExecutorNode()); + assertThat(assignment.getExecutorNode(), equalTo("past-data-node-1")); } public void testVerifyIndicesPrimaryShardsAreActive() { @@ -172,8 +178,7 @@ public void testVerifyIndicesPrimaryShardsAreActive() { csBuilder.metaData(metaData); ClusterState cs = csBuilder.build(); - assertEquals(0, - TransformPersistentTasksExecutor.verifyIndicesPrimaryShardsAreActive(cs, new IndexNameExpressionResolver()).size()); + assertEquals(0, TransformPersistentTasksExecutor.verifyIndicesPrimaryShardsAreActive(cs, new IndexNameExpressionResolver()).size()); metaData = new MetaData.Builder(cs.metaData()); routingTable = new RoutingTable.Builder(cs.routingTable()); @@ -197,8 +202,10 @@ public void testVerifyIndicesPrimaryShardsAreActive() { csBuilder.routingTable(routingTable.build()); csBuilder.metaData(metaData); - List result = - TransformPersistentTasksExecutor.verifyIndicesPrimaryShardsAreActive(csBuilder.build(), new IndexNameExpressionResolver()); + List result = TransformPersistentTasksExecutor.verifyIndicesPrimaryShardsAreActive( + csBuilder.build(), + new IndexNameExpressionResolver() + ); assertEquals(1, result.size()); assertEquals(indexToRemove, result.get(0)); } @@ -232,4 +239,163 @@ private void addIndices(MetaData.Builder metaData, RoutingTable.Builder routingT } } + private DiscoveryNodes.Builder buildNodes( + boolean dedicatedTransformNode, + boolean pastDataNode, + boolean transformRemoteNodes, + boolean transformLocanOnlyNodes, + boolean currentDataNode + ) { + + Map transformNodeAttributes = new HashMap<>(); + transformNodeAttributes.put(Transform.TRANSFORM_ENABLED_NODE_ATTR, "true"); + transformNodeAttributes.put(Transform.TRANSFORM_REMOTE_ENABLED_NODE_ATTR, "true"); + Map transformNodeAttributesDisabled = new HashMap<>(); + transformNodeAttributesDisabled.put(Transform.TRANSFORM_ENABLED_NODE_ATTR, "false"); + transformNodeAttributesDisabled.put(Transform.TRANSFORM_REMOTE_ENABLED_NODE_ATTR, "true"); + Map transformNodeAttributesNoRemote = new HashMap<>(); + transformNodeAttributesNoRemote.put(Transform.TRANSFORM_ENABLED_NODE_ATTR, "true"); + transformNodeAttributesNoRemote.put(Transform.TRANSFORM_REMOTE_ENABLED_NODE_ATTR, "false"); + + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); + + if (dedicatedTransformNode) { + nodes.add( + new DiscoveryNode( + "dedicated-transform-node", + buildNewFakeTransportAddress(), + transformNodeAttributes, + Collections.singleton(DiscoveryNodeRole.MASTER_ROLE), + Version.CURRENT + ) + ); + } + + if (pastDataNode) { + nodes.add( + new DiscoveryNode( + "past-data-node-1", + buildNewFakeTransportAddress(), + Collections.emptyMap(), + new HashSet<>(Arrays.asList(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.MASTER_ROLE)), + Version.V_7_5_0 + ) + ); + } + + if (transformRemoteNodes) { + nodes.add( + new DiscoveryNode( + "current-data-node-with-2-tasks", + buildNewFakeTransportAddress(), + transformNodeAttributes, + new HashSet<>(Arrays.asList(DiscoveryNodeRole.DATA_ROLE)), + Version.CURRENT + ) + ) + .add( + new DiscoveryNode( + "current-data-node-with-1-tasks", + buildNewFakeTransportAddress(), + transformNodeAttributes, + new HashSet<>(Arrays.asList(DiscoveryNodeRole.MASTER_ROLE)), + Version.CURRENT + ) + ); + } + + if (transformLocanOnlyNodes) { + nodes.add( + new DiscoveryNode( + "current-data-node-with-0-tasks-transform-remote-disabled", + buildNewFakeTransportAddress(), + transformNodeAttributesNoRemote, + new HashSet<>(Arrays.asList(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.MASTER_ROLE)), + Version.CURRENT + ) + ); + } + + if (currentDataNode) { + nodes.add( + new DiscoveryNode( + "current-data-node-with-transform-disabled", + buildNewFakeTransportAddress(), + transformNodeAttributesDisabled, + Set.of(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.MASTER_ROLE), + Version.CURRENT + ) + ); + } + + return nodes; + } + + private ClusterState buildClusterState(DiscoveryNodes.Builder nodes) { + MetaData.Builder metaData = MetaData.builder(); + RoutingTable.Builder routingTable = RoutingTable.builder(); + addIndices(metaData, routingTable); + PersistentTasksCustomMetaData.Builder pTasksBuilder = PersistentTasksCustomMetaData.builder() + .addTask( + "transform-task-1", + TransformTaskParams.NAME, + new TransformTaskParams("transform-task-1", Version.CURRENT, null, false), + new PersistentTasksCustomMetaData.Assignment("current-data-node-with-1-tasks", "") + ) + .addTask( + "transform-task-2", + TransformTaskParams.NAME, + new TransformTaskParams("transform-task-2", Version.CURRENT, null, false), + new PersistentTasksCustomMetaData.Assignment("current-data-node-with-2-tasks", "") + ) + .addTask( + "transform-task-3", + TransformTaskParams.NAME, + new TransformTaskParams("transform-task-3", Version.CURRENT, null, false), + new PersistentTasksCustomMetaData.Assignment("current-data-node-with-2-tasks", "") + ); + + PersistentTasksCustomMetaData pTasks = pTasksBuilder.build(); + metaData.putCustom(PersistentTasksCustomMetaData.TYPE, pTasks); + + ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")).nodes(nodes); + csBuilder.routingTable(routingTable.build()); + csBuilder.metaData(metaData); + + return csBuilder.build(); + + } + + public TransformPersistentTasksExecutor buildTaskExecutor() { + Client client = mock(Client.class); + TransformAuditor mockAuditor = mock(TransformAuditor.class); + IndexBasedTransformConfigManager transformsConfigManager = new IndexBasedTransformConfigManager(client, xContentRegistry()); + TransformCheckpointService transformCheckpointService = new TransformCheckpointService( + client, + Settings.EMPTY, + new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null), + transformsConfigManager, + mockAuditor + ); + TransformServices transformServices = new TransformServices( + transformsConfigManager, + transformCheckpointService, + mockAuditor, + mock(SchedulerEngine.class) + ); + + ClusterSettings cSettings = new ClusterSettings(Settings.EMPTY, Collections.singleton(Transform.NUM_FAILURE_RETRIES_SETTING)); + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.getClusterSettings()).thenReturn(cSettings); + when(clusterService.state()).thenReturn(TransformInternalIndexTests.STATE_WITH_LATEST_VERSIONED_INDEX_TEMPLATE); + + return new TransformPersistentTasksExecutor( + client, + transformServices, + mock(ThreadPool.class), + clusterService, + Settings.EMPTY, + new IndexNameExpressionResolver() + ); + } } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java index c2bd7352f66e6..418df16602899 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java @@ -96,7 +96,7 @@ public void testStopOnFailedTaskWithStoppedIndexer() { "some_type", "some_action", TaskId.EMPTY_TASK_ID, - new TransformTaskParams(transformConfig.getId(), Version.CURRENT, TimeValue.timeValueSeconds(10)), + new TransformTaskParams(transformConfig.getId(), Version.CURRENT, TimeValue.timeValueSeconds(10), false), transformState, mock(SchedulerEngine.class), auditor, @@ -176,7 +176,7 @@ public void testStopOnFailedTaskWithoutIndexer() { "some_type", "some_action", TaskId.EMPTY_TASK_ID, - new TransformTaskParams(transformConfig.getId(), Version.CURRENT, TimeValue.timeValueSeconds(10)), + new TransformTaskParams(transformConfig.getId(), Version.CURRENT, TimeValue.timeValueSeconds(10), false), transformState, mock(SchedulerEngine.class), auditor, diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java index 20aaf9714171b..d138b4b90da75 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java @@ -97,11 +97,16 @@ void setConfiguration(Configuration configuration) { * * @param shardId The shard id object of the document being processed * @param operation The index operation - * @return The index operation + * @param result The result of the operation */ @Override - public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { + public void postIndex(ShardId shardId, Engine.Index operation, Engine.IndexResult result) { if (isWatchDocument(shardId.getIndexName())) { + if (result.getResultType() == Engine.Result.Type.FAILURE) { + postIndex(shardId, operation, result.getFailure()); + return; + } + ZonedDateTime now = Instant.ofEpochMilli(clock.millis()).atZone(ZoneOffset.UTC); try { Watch watch = parser.parseWithSecrets(operation.id(), true, operation.source(), now, XContentType.JSON, @@ -109,8 +114,8 @@ public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { ShardAllocationConfiguration shardAllocationConfiguration = configuration.localShards.get(shardId); if (shardAllocationConfiguration == null) { logger.debug("no distributed watch execution info found for watch [{}] on shard [{}], got configuration for {}", - watch.id(), shardId, configuration.localShards.keySet()); - return operation; + watch.id(), shardId, configuration.localShards.keySet()); + return; } boolean shouldBeTriggered = shardAllocationConfiguration.shouldBeTriggered(watch.id()); @@ -128,32 +133,12 @@ public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { } catch (IOException e) { throw new ElasticsearchParseException("Could not parse watch with id [{}]", e, operation.id()); } - } - - return operation; } /** - * In case of a document related failure (for example version conflict), then clean up resources for a watch - * in the trigger service. - */ - @Override - public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult result) { - if (result.getResultType() == Engine.Result.Type.FAILURE) { - assert result.getFailure() != null; - postIndex(shardId, index, result.getFailure()); - } - } - - /** - * In case of an engine related error, we have to ensure that the triggerservice does not leave anything behind - * - * TODO: If the configuration changes between preindex and postindex methods and we add a - * watch, that could not be indexed - * TODO: this watch might not be deleted from the triggerservice. Are we willing to accept this? - * TODO: This could be circumvented by using a threadlocal in preIndex(), that contains the - * watch and is cleared afterwards + * In case of an engine related error, we just log that we failed the add the watch to the trigger service. + * No need to interact with the trigger service. * * @param shardId The shard id object of the document being processed * @param index The index operation @@ -162,8 +147,7 @@ public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult re @Override public void postIndex(ShardId shardId, Engine.Index index, Exception ex) { if (isWatchDocument(shardId.getIndexName())) { - logger.debug(() -> new ParameterizedMessage("removing watch [{}] from trigger", index.id()), ex); - triggerService.remove(index.id()); + logger.debug(() -> new ParameterizedMessage("failed to add watch [{}] to trigger service", index.id()), ex); } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java index c4baea3e9f61f..0d8af470df056 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java @@ -113,18 +113,20 @@ public void testPreIndexCheckActive() throws Exception { verifyZeroInteractions(parser); } - public void testPreIndex() throws Exception { + public void testPostIndex() throws Exception { when(operation.id()).thenReturn(randomAlphaOfLength(10)); when(operation.source()).thenReturn(BytesArray.EMPTY); when(shardId.getIndexName()).thenReturn(Watch.INDEX); + List types = new ArrayList<>(List.of(Engine.Result.Type.values())); + types.remove(Engine.Result.Type.FAILURE); + when(result.getResultType()).thenReturn(randomFrom(types)); boolean watchActive = randomBoolean(); boolean isNewWatch = randomBoolean(); Watch watch = mockWatch("_id", watchActive, isNewWatch); when(parser.parseWithSecrets(anyObject(), eq(true), anyObject(), anyObject(), anyObject(), anyLong(), anyLong())).thenReturn(watch); - Engine.Index returnedOperation = listener.preIndex(shardId, operation); - assertThat(returnedOperation, is(operation)); + listener.postIndex(shardId, operation, result); ZonedDateTime now = DateUtils.nowWithMillisResolution(clock); verify(parser).parseWithSecrets(eq(operation.id()), eq(true), eq(BytesArray.EMPTY), eq(now), anyObject(), anyLong(), anyLong()); @@ -139,12 +141,13 @@ public void testPreIndex() throws Exception { // this test emulates an index with 10 shards, and ensures that triggering only happens on a // single shard - public void testPreIndexWatchGetsOnlyTriggeredOnceAcrossAllShards() throws Exception { + public void testPostIndexWatchGetsOnlyTriggeredOnceAcrossAllShards() throws Exception { String id = randomAlphaOfLength(10); int totalShardCount = randomIntBetween(1, 10); boolean watchActive = randomBoolean(); boolean isNewWatch = randomBoolean(); Watch watch = mockWatch(id, watchActive, isNewWatch); + when(result.getResultType()).thenReturn(Engine.Result.Type.SUCCESS); when(shardId.getIndexName()).thenReturn(Watch.INDEX); when(parser.parseWithSecrets(anyObject(), eq(true), anyObject(), anyObject(), anyObject(), anyLong(), anyLong())).thenReturn(watch); @@ -154,7 +157,7 @@ public void testPreIndexWatchGetsOnlyTriggeredOnceAcrossAllShards() throws Excep localShards.put(shardId, new ShardAllocationConfiguration(idx, totalShardCount, Collections.emptyList())); Configuration configuration = new Configuration(Watch.INDEX, localShards); listener.setConfiguration(configuration); - listener.preIndex(shardId, operation); + listener.postIndex(shardId, operation, result); } // no matter how many shards we had, this should have been only called once @@ -186,16 +189,17 @@ private Watch mockWatch(String id, boolean active, boolean isNewWatch) { return watch; } - public void testPreIndexCheckParsingException() throws Exception { + public void testPostIndexCheckParsingException() throws Exception { String id = randomAlphaOfLength(10); when(operation.id()).thenReturn(id); when(operation.source()).thenReturn(BytesArray.EMPTY); when(shardId.getIndexName()).thenReturn(Watch.INDEX); when(parser.parseWithSecrets(anyObject(), eq(true), anyObject(), anyObject(), anyObject(), anyLong(), anyLong())) .thenThrow(new IOException("self thrown")); + when(result.getResultType()).thenReturn(Engine.Result.Type.SUCCESS); ElasticsearchParseException exc = expectThrows(ElasticsearchParseException.class, - () -> listener.preIndex(shardId, operation)); + () -> listener.postIndex(shardId, operation, result)); assertThat(exc.getMessage(), containsString("Could not parse watch")); assertThat(exc.getMessage(), containsString(id)); } @@ -206,19 +210,6 @@ public void testPostIndexRemoveTriggerOnDocumentRelatedException() throws Except when(result.getFailure()).thenReturn(new RuntimeException()); when(shardId.getIndexName()).thenReturn(Watch.INDEX); - listener.postIndex(shardId, operation, result); - verify(triggerService).remove(eq("_id")); - } - - public void testPostIndexRemoveTriggerOnDocumentRelatedException_ignoreOtherEngineResultTypes() throws Exception { - List types = new ArrayList<>(List.of(Engine.Result.Type.values())); - types.remove(Engine.Result.Type.FAILURE); - - when(operation.id()).thenReturn("_id"); - when(result.getResultType()).thenReturn(randomFrom(types)); - when(result.getFailure()).thenReturn(new RuntimeException()); - when(shardId.getIndexName()).thenReturn(Watch.INDEX); - listener.postIndex(shardId, operation, result); verifyZeroInteractions(triggerService); } @@ -238,7 +229,7 @@ public void testPostIndexRemoveTriggerOnEngineLevelException() throws Exception when(shardId.getIndexName()).thenReturn(Watch.INDEX); listener.postIndex(shardId, operation, new ElasticsearchParseException("whatever")); - verify(triggerService).remove(eq("_id")); + verifyZeroInteractions(triggerService); } public void testPostIndexRemoveTriggerOnEngineLevelException_ignoreNonWatcherDocument() throws Exception { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java index 1deced67b3d23..8054bdbad3380 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java @@ -72,7 +72,9 @@ public void startWebservice() throws Exception { @After public void stopWebservice() throws Exception { - webServer.close(); + if (webServer != null) { + webServer.close(); + } } public void testHttps() throws Exception {