diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java index d5db46e313e7e..6c1ea558dee2a 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java @@ -75,6 +75,8 @@ public ElasticsearchCluster(String path, String clusterName, Project project, Fi services, artifactsExtractDir, workingDirBase ) ); + // configure the cluster name eagerly so nodes know about it + this.nodes.all((node) -> node.defaultConfig.put("cluster.name", safeName(clusterName))); addWaitForClusterHealth(); } @@ -217,7 +219,6 @@ public void setJavaHome(File javaHome) { public void start() { String nodeNames = nodes.stream().map(ElasticsearchNode::getName).collect(Collectors.joining(",")); for (ElasticsearchNode node : nodes) { - node.defaultConfig.put("cluster.name", safeName(clusterName)); if (Version.fromString(node.getVersion()).getMajor() >= 7) { node.defaultConfig.put("cluster.initial_master_nodes", "[" + nodeNames + "]"); node.defaultConfig.put("discovery.seed_providers", "file"); @@ -328,7 +329,8 @@ private void addWaitForClusterHealth() { nodes.size() ); if (httpSslEnabled) { - wait.setCertificateAuthorities(getFirstNode().getHttpCertificateAuthoritiesFile()); + + getFirstNode().configureHttpWait(wait); } List> credentials = getFirstNode().getCredentials(); if (getFirstNode().getCredentials().isEmpty() == false) { diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index fda41467c767b..f838d3fd2ac57 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -23,6 +23,7 @@ import org.elasticsearch.gradle.FileSupplier; import org.elasticsearch.gradle.OS; import org.elasticsearch.gradle.Version; +import org.elasticsearch.gradle.http.WaitForHttpResource; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; @@ -581,7 +582,11 @@ public List getAllTransportPortURI() { } public File getServerLog() { - return confPathLogs.resolve(safeName(getName()).replaceAll("-[0-9]+$", "") + "_server.json").toFile(); + return confPathLogs.resolve(defaultConfig.get("cluster.name") + "_server.json").toFile(); + } + + public File getAuditLog() { + return confPathLogs.resolve(defaultConfig.get("cluster.name") + "_audit.json").toFile(); } @Override @@ -880,12 +885,32 @@ public boolean isHttpSslEnabled() { ); } - public File getHttpCertificateAuthoritiesFile() { - if (settings.containsKey("xpack.security.http.ssl.certificate_authorities") == false) { - throw new TestClustersException("Can't get certificates authority file, not configured for " + this); + void configureHttpWait(WaitForHttpResource wait) { + if (settings.containsKey("xpack.security.http.ssl.certificate_authorities")) { + wait.setCertificateAuthorities( + getConfigDir() + .resolve(settings.get("xpack.security.http.ssl.certificate_authorities").get().toString()) + .toFile() + ); + } + if (settings.containsKey("xpack.security.http.ssl.certificate")) { + wait.setCertificateAuthorities( + getConfigDir() + .resolve(settings.get("xpack.security.http.ssl.certificate").get().toString()) + .toFile() + ); + } + if (settings.containsKey("xpack.security.http.ssl.keystore.path")) { + wait.setTrustStoreFile( + getConfigDir() + .resolve(settings.get("xpack.security.http.ssl.keystore.path").get().toString()) + .toFile() + ); + } + if (keystoreSettings.containsKey("xpack.security.http.ssl.keystore.secure_password")) { + wait.setTrustStorePassword( + keystoreSettings.get("xpack.security.http.ssl.keystore.secure_password").get().toString() + ); } - return getConfigDir() - .resolve(settings.get("xpack.security.http.ssl.certificate_authorities").get().toString()) - .toFile(); } } diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 0d3b2e24fcb22..0e05eb567687a 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.rest-test' apply plugin: 'nebula.maven-base-publish' @@ -115,14 +116,15 @@ if (isEclipse) { File nodeCert = file("./testnode.crt") File nodeTrustStore = file("./testnode.jks") -integTestRunner { +integTest.runner { systemProperty 'tests.rest.cluster.username', System.getProperty('tests.rest.cluster.username', 'test_user') systemProperty 'tests.rest.cluster.password', System.getProperty('tests.rest.cluster.password', 'test-password') } -integTestCluster { +testClusters.integTest { + distribution = "DEFAULT" systemProperty 'es.scripting.update.ctx_in_params', 'false' - setting 'reindex.remote.whitelist', ['"[::1]:*"', '"127.0.0.1:*"'] + setting 'reindex.remote.whitelist', '[ "[::1]:*", "127.0.0.1:*" ]' setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' setting 'xpack.security.authc.token.enabled', 'true' @@ -131,22 +133,10 @@ integTestCluster { setting 'xpack.security.http.ssl.certificate_authorities', 'testnode.crt' setting 'xpack.security.transport.ssl.truststore.path', 'testnode.jks' setting 'indices.lifecycle.poll_interval', '1000ms' - keystoreSetting 'xpack.security.transport.ssl.truststore.secure_password', 'testnode' - setupCommand 'setupDummyUser', - 'bin/elasticsearch-users', - 'useradd', System.getProperty('tests.rest.cluster.username', 'test_user'), - '-p', System.getProperty('tests.rest.cluster.password', 'test-password'), - '-r', 'superuser' + keystore 'xpack.security.transport.ssl.truststore.secure_password', 'testnode' + user username: System.getProperty('tests.rest.cluster.username', 'test_user'), + password: System.getProperty('tests.rest.cluster.password', 'test-password') + extraConfigFile nodeCert.name, nodeCert extraConfigFile nodeTrustStore.name, nodeTrustStore - waitCondition = { node, ant -> - File tmpFile = new File(node.cwd, 'wait.success') - ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", - dest: tmpFile.toString(), - username: System.getProperty('tests.rest.cluster.username', 'test_user'), - password: System.getProperty('tests.rest.cluster.password', 'test-password'), - ignoreerrors: true, - retries: 10) - return tmpFile.exists() - } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DestConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DestConfig.java index 5e81a368f66b4..f808fa867209a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DestConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DestConfig.java @@ -28,6 +28,7 @@ import java.util.Objects; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; /** * Configuration containing the destination index for the {@link DataFrameTransformConfig} @@ -35,29 +36,40 @@ public class DestConfig implements ToXContentObject { public static final ParseField INDEX = new ParseField("index"); + public static final ParseField PIPELINE = new ParseField("pipeline"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("data_frame_config_dest", true, - args -> new DestConfig((String)args[0])); + args -> new DestConfig((String)args[0], (String)args[1])); static { PARSER.declareString(constructorArg(), INDEX); + PARSER.declareString(optionalConstructorArg(), PIPELINE); } private final String index; + private final String pipeline; - public DestConfig(String index) { + DestConfig(String index, String pipeline) { this.index = Objects.requireNonNull(index, INDEX.getPreferredName()); + this.pipeline = pipeline; } public String getIndex() { return index; } + public String getPipeline() { + return pipeline; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(INDEX.getPreferredName(), index); + if (pipeline != null) { + builder.field(PIPELINE.getPreferredName(), pipeline); + } builder.endObject(); return builder; } @@ -72,11 +84,45 @@ public boolean equals(Object other) { } DestConfig that = (DestConfig) other; - return Objects.equals(index, that.index); + return Objects.equals(index, that.index) && + Objects.equals(pipeline, that.pipeline); } @Override public int hashCode(){ - return Objects.hash(index); + return Objects.hash(index, pipeline); + } + + public static Builder builder() { + return new Builder(); + } + + public static class Builder { + private String index; + private String pipeline; + + /** + * Sets which index to which to write the data + * @param index where to write the data + * @return The {@link Builder} with index set + */ + public Builder setIndex(String index) { + this.index = Objects.requireNonNull(index, INDEX.getPreferredName()); + return this; + } + + /** + * Sets the pipeline through which the indexed documents should be processed + * @param pipeline The pipeline ID + * @return The {@link Builder} with pipeline set + */ + public Builder setPipeline(String pipeline) { + this.pipeline = pipeline; + return this; + } + + public DestConfig build() { + return new DestConfig(index, pipeline); + } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java index 5e00dfb8ed3c9..8489d14e1017b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java @@ -307,7 +307,7 @@ private DataFrameTransformConfig validDataFrameTransformConfig(String id, String aggBuilder.addAggregator(AggregationBuilders.avg("avg_rating").field("stars")); PivotConfig pivotConfig = PivotConfig.builder().setGroups(groupConfig).setAggregations(aggBuilder).build(); - DestConfig destConfig = (destination != null) ? new DestConfig(destination) : null; + DestConfig destConfig = (destination != null) ? DestConfig.builder().setIndex(destination).build() : null; return DataFrameTransformConfig.builder() .setId(id) @@ -318,6 +318,7 @@ private DataFrameTransformConfig validDataFrameTransformConfig(String id, String .build(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/43324") public void testGetStats() throws Exception { String sourceIndex = "transform-source"; createIndex(sourceIndex); @@ -333,7 +334,7 @@ public void testGetStats() throws Exception { DataFrameTransformConfig transform = DataFrameTransformConfig.builder() .setId(id) .setSource(SourceConfig.builder().setIndex(sourceIndex).setQuery(new MatchAllQueryBuilder()).build()) - .setDest(new DestConfig("pivot-dest")) + .setDest(DestConfig.builder().setIndex("pivot-dest").build()) .setPivotConfig(pivotConfig) .setDescription("transform for testing stats") .build(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 25e610d67814f..8ef28733f2e12 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -1286,7 +1286,7 @@ public void testGetFilters() throws Exception { GetFiltersResponse getFiltersResponse = execute(getFiltersRequest, machineLearningClient::getFilter, machineLearningClient::getFilterAsync); - assertThat(getFiltersResponse.count(), equalTo(2L)); + assertThat(getFiltersResponse.count(), equalTo(3L)); assertThat(getFiltersResponse.filters().size(), equalTo(2)); assertThat(getFiltersResponse.filters().stream().map(MlFilter::getId).collect(Collectors.toList()), containsInAnyOrder("get-filter-test-2", "get-filter-test-3")); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DestConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DestConfigTests.java index f2950b64cf7c9..0dc8f99d7631b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DestConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DestConfigTests.java @@ -27,7 +27,8 @@ public class DestConfigTests extends AbstractXContentTestCase { public static DestConfig randomDestConfig() { - return new DestConfig(randomAlphaOfLength(10)); + return new DestConfig(randomAlphaOfLength(10), + randomBoolean() ? null : randomAlphaOfLength(10)); } @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java index f0dfda1d58902..6604e97ed5b97 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java @@ -125,6 +125,11 @@ public void testPutDataFrameTransform() throws IOException, InterruptedException .setIndex("source-index") .setQueryConfig(queryConfig).build(); // end::put-data-frame-transform-source-config + // tag::put-data-frame-transform-dest-config + DestConfig destConfig = DestConfig.builder() + .setIndex("pivot-destination") + .setPipeline("my-pipeline").build(); + // end::put-data-frame-transform-dest-config // tag::put-data-frame-transform-group-config GroupConfig groupConfig = GroupConfig.builder() .groupBy("reviewer", // <1> @@ -149,7 +154,7 @@ public void testPutDataFrameTransform() throws IOException, InterruptedException .builder() .setId("reviewer-avg-rating") // <1> .setSource(sourceConfig) // <2> - .setDest(new DestConfig("pivot-destination")) // <3> + .setDest(destConfig) // <3> .setPivotConfig(pivotConfig) // <4> .setDescription("This is my test transform") // <5> .build(); @@ -222,7 +227,7 @@ public void testStartStop() throws IOException, InterruptedException { DataFrameTransformConfig transformConfig = DataFrameTransformConfig.builder() .setId("mega-transform") .setSource(SourceConfig.builder().setIndex("source-data").setQueryConfig(queryConfig).build()) - .setDest(new DestConfig("pivot-dest")) + .setDest(DestConfig.builder().setIndex("pivot-dest").build()) .setPivotConfig(pivotConfig) .build(); @@ -344,7 +349,7 @@ public void testDeleteDataFrameTransform() throws IOException, InterruptedExcept .setIndex("source-data") .setQuery(new MatchAllQueryBuilder()) .build()) - .setDest(new DestConfig("pivot-dest")) + .setDest(DestConfig.builder().setIndex("pivot-dest").build()) .setPivotConfig(pivotConfig) .build(); DataFrameTransformConfig transformConfig2 = DataFrameTransformConfig.builder() @@ -353,7 +358,7 @@ public void testDeleteDataFrameTransform() throws IOException, InterruptedExcept .setIndex("source-data") .setQuery(new MatchAllQueryBuilder()) .build()) - .setDest(new DestConfig("pivot-dest2")) + .setDest(DestConfig.builder().setIndex("pivot-dest2").build()) .setPivotConfig(pivotConfig) .build(); @@ -488,7 +493,7 @@ public void testGetStats() throws IOException, InterruptedException { .setIndex("source-data") .setQuery(new MatchAllQueryBuilder()) .build()) - .setDest(new DestConfig("pivot-dest")) + .setDest(DestConfig.builder().setIndex("pivot-dest").build()) .setPivotConfig(pivotConfig) .build(); client.dataFrame().putDataFrameTransform(new PutDataFrameTransformRequest(transformConfig), RequestOptions.DEFAULT); @@ -574,7 +579,7 @@ public void testGetDataFrameTransform() throws IOException, InterruptedException .setIndex("source-data") .setQuery(new MatchAllQueryBuilder()) .build()) - .setDest(new DestConfig("pivot-dest")) + .setDest(DestConfig.builder().setIndex("pivot-dest").build()) .setPivotConfig(pivotConfig) .build(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java index 5a6016175bae2..b8149c99a5473 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java @@ -452,7 +452,7 @@ public void testSnapshotVerifyRepository() throws IOException { List repositoryMetaDataResponse = response.getNodes(); // end::verify-repository-response assertThat(1, equalTo(repositoryMetaDataResponse.size())); - assertThat("node-0", equalTo(repositoryMetaDataResponse.get(0).getName())); + assertThat("integTest-0", equalTo(repositoryMetaDataResponse.get(0).getName())); } public void testSnapshotVerifyRepositoryAsync() throws InterruptedException { diff --git a/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc index 567449c9c25b1..19c7fe443dbcd 100644 --- a/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc +++ b/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc @@ -33,7 +33,7 @@ include-tagged::{doc-tests-file}[{api}-config] -------------------------------------------------- <1> The {dataframe-transform} ID <2> The source indices and query from which to gather data -<3> The destination index +<3> The destination index and optional pipeline <4> The PivotConfig <5> Optional free text description of the transform @@ -49,6 +49,16 @@ If query is not set, a `match_all` query is used by default. include-tagged::{doc-tests-file}[{api}-source-config] -------------------------------------------------- +==== DestConfig + +The index where to write the data and the optional pipeline +through which the docs should be indexed + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-dest-config] +-------------------------------------------------- + ===== QueryConfig The query with which to select data from the source. diff --git a/docs/reference/data-frames/apis/put-transform.asciidoc b/docs/reference/data-frames/apis/put-transform.asciidoc index 428321aa0305f..10000126ef002 100644 --- a/docs/reference/data-frames/apis/put-transform.asciidoc +++ b/docs/reference/data-frames/apis/put-transform.asciidoc @@ -38,7 +38,8 @@ IMPORTANT: You must use {kib} or this API to create a {dataframe-transform}. `source` (required):: (object) The source configuration, consisting of `index` and optionally a `query`. -`dest` (required):: (object) The destination configuration, consisting of `index`. +`dest` (required):: (object) The destination configuration, consisting of `index` and optionally a +`pipeline` id. `pivot`:: (object) Defines the pivot function `group by` fields and the aggregation to reduce the data. See <>. @@ -76,7 +77,8 @@ PUT _data_frame/transforms/ecommerce_transform } }, "dest": { - "index": "kibana_sample_data_ecommerce_transform" + "index": "kibana_sample_data_ecommerce_transform", + "pipeline": "add_timestamp_pipeline" }, "pivot": { "group_by": { diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index 4e866624309a7..7ce7b430ef610 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -10,7 +10,7 @@ under the CCA-ShareAlike 4.0 license. For more details see, http://dev.maxmind.c The `geoip` processor can run with other GeoIP2 databases from Maxmind. The files must be copied into the `ingest-geoip` config directory, and the `database_file` option should be used to specify the filename of the custom database. Custom database files must be stored -uncompressed. The `ingest-geoip` config directory is located at `$ES_HOME/config/ingest-geoip`. +uncompressed. The `ingest-geoip` config directory is located at `$ES_CONFIG/ingest-geoip`. [[using-ingest-geoip]] ==== Using the `geoip` Processor in a Pipeline diff --git a/docs/reference/mapping/params/coerce.asciidoc b/docs/reference/mapping/params/coerce.asciidoc index 55f31262351fd..be5b2a648c600 100644 --- a/docs/reference/mapping/params/coerce.asciidoc +++ b/docs/reference/mapping/params/coerce.asciidoc @@ -47,8 +47,7 @@ PUT my_index/_doc/2 <1> The `number_one` field will contain the integer `10`. <2> This document will be rejected because coercion is disabled. -TIP: The `coerce` setting is allowed to have different settings for fields of -the same name in the same index. Its value can be updated on existing fields +TIP: The `coerce` setting value can be updated on existing fields using the <>. [[coerce-setting]] diff --git a/docs/reference/mapping/params/ignore-malformed.asciidoc b/docs/reference/mapping/params/ignore-malformed.asciidoc index 8c91bb48ee7cd..d84a7290eb7c9 100644 --- a/docs/reference/mapping/params/ignore-malformed.asciidoc +++ b/docs/reference/mapping/params/ignore-malformed.asciidoc @@ -46,8 +46,7 @@ PUT my_index/_doc/2 <1> This document will have the `text` field indexed, but not the `number_one` field. <2> This document will be rejected because `number_two` does not allow malformed values. -TIP: The `ignore_malformed` setting is allowed to have different settings for -fields of the same name in the same index. Its value can be updated on +TIP: The `ignore_malformed` setting value can be updated on existing fields using the <>. diff --git a/docs/reference/mapping/params/multi-fields.asciidoc b/docs/reference/mapping/params/multi-fields.asciidoc index ee1bc02c7fd8d..448f7fd2e81ec 100644 --- a/docs/reference/mapping/params/multi-fields.asciidoc +++ b/docs/reference/mapping/params/multi-fields.asciidoc @@ -60,8 +60,7 @@ GET my_index/_search NOTE: Multi-fields do not change the original `_source` field. -TIP: The `fields` setting is allowed to have different settings for fields of -the same name in the same index. New multi-fields can be added to existing +TIP: New multi-fields can be added to existing fields using the <>. ==== Multi-fields with multiple analyzers diff --git a/docs/reference/mapping/params/norms.asciidoc b/docs/reference/mapping/params/norms.asciidoc index 8a7be4baef8c5..6a250d296a2dc 100644 --- a/docs/reference/mapping/params/norms.asciidoc +++ b/docs/reference/mapping/params/norms.asciidoc @@ -11,11 +11,10 @@ don't need scoring on a specific field, you should disable norms on that field. In particular, this is the case for fields that are used solely for filtering or aggregations. -TIP: The `norms` setting must have the same setting for fields of the -same name in the same index. Norms can be disabled on existing fields using +TIP: Norms can be disabled on existing fields using the <>. -Norms can be disabled (but not reenabled) after the fact, using the +Norms can be disabled (but not reenabled after the fact), using the <> like so: [source,js] diff --git a/docs/reference/query-dsl/intervals-query.asciidoc b/docs/reference/query-dsl/intervals-query.asciidoc index f5788783f7e63..3049cb363173a 100644 --- a/docs/reference/query-dsl/intervals-query.asciidoc +++ b/docs/reference/query-dsl/intervals-query.asciidoc @@ -40,7 +40,6 @@ POST _search } ] }, - "boost" : 2.0, "_name" : "favourite_food" } } @@ -298,4 +297,4 @@ POST _search } } -------------------------------------------------- -// CONSOLE \ No newline at end of file +// CONSOLE diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java index 283ead0c91872..7eaa989b88a32 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java @@ -37,7 +37,6 @@ import org.elasticsearch.ingest.IngestTestPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.tasks.TaskInfo; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.hamcrest.Matcher; import org.junit.Before; @@ -60,7 +59,6 @@ * different cancellation places - that is the responsibility of AsyncBulkByScrollActionTests which have more precise control to * simulate failures but does not exercise important portion of the stack like transport and task management. */ -@TestLogging("org.elasticsearch.index.reindex:DEBUG,org.elasticsearch.action.bulk:DEBUG") public class CancelTests extends ReindexTestCase { protected static final String INDEX = "reindex-cancel-index"; diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java index 7783705b04f7f..4b9a69cfce1c8 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.bulk.BulkItemResponse.Failure; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.ArrayList; import java.util.List; @@ -38,7 +37,6 @@ /** * Tests failure capturing and abort-on-failure behavior of reindex. */ -@TestLogging("_root:DEBUG") public class ReindexFailureTests extends ReindexTestCase { public void testFailuresCauseAbortDefault() throws Exception { /* diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java index 58067cd2cdbbf..4420fe450bd4d 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.ArrayList; import java.util.List; @@ -50,7 +49,6 @@ * too but this is the only place that tests running against multiple nodes so it is the only integration tests that checks for * serialization. */ -@TestLogging("org.elasticsearch.index.reindex:TRACE,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.search.SearchService:TRACE") public class RethrottleTests extends ReindexTestCase { public void testReindex() throws Exception { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryBasicTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryBasicTests.java index 656c975fa6cbc..f55ee1e802e6b 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryBasicTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryBasicTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.ArrayList; import java.util.Collection; @@ -34,7 +33,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.hasSize; -@TestLogging("org.elasticsearch.index.reindex:TRACE,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.search.SearchService:TRACE") public class UpdateByQueryBasicTests extends ReindexTestCase { public void testBasics() throws Exception { indexRandom(true, client().prepareIndex("test", "test", "1").setSource("foo", "a"), diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java index 22a0777f7bffb..79cbab4781947 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java @@ -20,6 +20,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.support.ActionFilters; @@ -94,12 +95,12 @@ protected void shardOperationOnPrimary(final ShardRequest shardRequest, final In } @Override - protected ReplicaResult shardOperationOnReplica(final ShardRequest shardRequest, final IndexShard replica) { + protected ReplicaResult shardOperationOnReplica(final ShardRequest shardRequest, final IndexShard replica) throws IOException { executeShardOperation(shardRequest, replica); return new ReplicaResult(); } - private void executeShardOperation(final ShardRequest request, final IndexShard indexShard) { + private void executeShardOperation(final ShardRequest request, final IndexShard indexShard) throws IOException { final ShardId shardId = indexShard.shardId(); if (indexShard.getActiveOperationsCount() != IndexShard.OPERATIONS_BLOCKED) { throw new IllegalStateException("Index shard " + shardId + " is not blocking all operations during closing"); @@ -109,9 +110,19 @@ private void executeShardOperation(final ShardRequest request, final IndexShard if (clusterBlocks.hasIndexBlock(shardId.getIndexName(), request.clusterBlock()) == false) { throw new IllegalStateException("Index shard " + shardId + " must be blocked by " + request.clusterBlock() + " before closing"); } - indexShard.verifyShardBeforeIndexClosing(); - indexShard.flush(new FlushRequest().force(true).waitIfOngoing(true)); - logger.trace("{} shard is ready for closing", shardId); + if (request.isPhase1()) { + // in order to advance the global checkpoint to the maximum sequence number, the (persisted) local checkpoint needs to be + // advanced first, which, when using async translog syncing, does not automatically hold at the time where we have acquired + // all operation permits. Instead, this requires and explicit sync, which communicates the updated (persisted) local checkpoint + // to the primary (we call this phase1), and phase2 can then use the fact that the global checkpoint has moved to the maximum + // sequence number to pass the verifyShardBeforeIndexClosing check and create a safe commit where the maximum sequence number + // is equal to the global checkpoint. + indexShard.sync(); + } else { + indexShard.verifyShardBeforeIndexClosing(); + indexShard.flush(new FlushRequest().force(true).waitIfOngoing(true)); + logger.trace("{} shard is ready for closing", shardId); + } } @Override @@ -136,14 +147,22 @@ public static class ShardRequest extends ReplicationRequest { private final ClusterBlock clusterBlock; + private final boolean phase1; + ShardRequest(StreamInput in) throws IOException { super(in); clusterBlock = new ClusterBlock(in); + if (in.getVersion().onOrAfter(Version.V_7_3_0)) { + phase1 = in.readBoolean(); + } else { + phase1 = false; + } } - public ShardRequest(final ShardId shardId, final ClusterBlock clusterBlock, final TaskId parentTaskId) { + public ShardRequest(final ShardId shardId, final ClusterBlock clusterBlock, final boolean phase1, final TaskId parentTaskId) { super(shardId); this.clusterBlock = Objects.requireNonNull(clusterBlock); + this.phase1 = phase1; setParentTask(parentTaskId); } @@ -161,10 +180,17 @@ public void readFrom(final StreamInput in) { public void writeTo(final StreamOutput out) throws IOException { super.writeTo(out); clusterBlock.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_7_3_0)) { + out.writeBoolean(phase1); + } } public ClusterBlock clusterBlock() { return clusterBlock; } + + public boolean isPhase1() { + return phase1; + } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index c36d6092a7e49..97f769d18bbb1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -44,6 +44,7 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService; import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.shard.DocsStats; @@ -54,6 +55,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -233,7 +235,11 @@ static String generateRolloverIndexName(String sourceIndexName, IndexNameExpress } static Map evaluateConditions(final Collection> conditions, - final DocsStats docsStats, final IndexMetaData metaData) { + @Nullable final DocsStats docsStats, + @Nullable final IndexMetaData metaData) { + if (metaData == null) { + return conditions.stream().collect(Collectors.toMap(Condition::toString, cond -> false)); + } final long numDocs = docsStats == null ? 0 : docsStats.getCount(); final long indexSize = docsStats == null ? 0 : docsStats.getTotalSizeInBytes(); final Condition.Stats stats = new Condition.Stats(numDocs, metaData.getCreationDate(), new ByteSizeValue(indexSize)); @@ -242,9 +248,18 @@ static Map evaluateConditions(final Collection> co .collect(Collectors.toMap(result -> result.condition.toString(), result -> result.matched)); } - static Map evaluateConditions(final Collection> conditions, final IndexMetaData metaData, - final IndicesStatsResponse statsResponse) { - return evaluateConditions(conditions, statsResponse.getIndex(metaData.getIndex().getName()).getPrimaries().getDocs(), metaData); + static Map evaluateConditions(final Collection> conditions, + @Nullable final IndexMetaData metaData, + @Nullable final IndicesStatsResponse statsResponse) { + if (metaData == null) { + return conditions.stream().collect(Collectors.toMap(Condition::toString, cond -> false)); + } else { + final DocsStats docsStats = Optional.ofNullable(statsResponse) + .map(stats -> stats.getIndex(metaData.getIndex().getName())) + .map(indexStats -> indexStats.getPrimaries().getDocs()) + .orElse(null); + return evaluateConditions(conditions, docsStats, metaData); + } } static void validate(MetaData metaData, RolloverRequest request) { diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 7917d9c05078b..d328f06eb6895 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -111,6 +111,7 @@ public void execute() throws Exception { private void handlePrimaryResult(final PrimaryResultT primaryResult) { this.primaryResult = primaryResult; primary.updateLocalCheckpointForShard(primary.routingEntry().allocationId().getId(), primary.localCheckpoint()); + primary.updateGlobalCheckpointForShard(primary.routingEntry().allocationId().getId(), primary.globalCheckpoint()); final ReplicaRequest replicaRequest = primaryResult.replicaRequest(); if (replicaRequest != null) { if (logger.isTraceEnabled()) { @@ -123,7 +124,7 @@ private void handlePrimaryResult(final PrimaryResultT primaryResult) { // is valid for this replication group. If we would sample in the reverse, the global checkpoint might be based on a subset // of the sampled replication group, and advanced further than what the given replication group would allow it to. // This would entail that some shards could learn about a global checkpoint that would be higher than its local checkpoint. - final long globalCheckpoint = primary.globalCheckpoint(); + final long globalCheckpoint = primary.computedGlobalCheckpoint(); // we have to capture the max_seq_no_of_updates after this request was completed on the primary to make sure the value of // max_seq_no_of_updates on replica when this request is executed is at least the value on the primary when it was executed // on. @@ -341,16 +342,23 @@ public interface Primary< void updateGlobalCheckpointForShard(String allocationId, long globalCheckpoint); /** - * Returns the local checkpoint on the primary shard. + * Returns the persisted local checkpoint on the primary shard. * * @return the local checkpoint */ long localCheckpoint(); /** - * Returns the global checkpoint on the primary shard. + * Returns the global checkpoint computed on the primary shard. * - * @return the global checkpoint + * @return the computed global checkpoint + */ + long computedGlobalCheckpoint(); + + /** + * Returns the persisted global checkpoint on the primary shard. + * + * @return the persisted global checkpoint */ long globalCheckpoint(); @@ -419,16 +427,16 @@ void performOn(ShardRouting replica, RequestT replicaRequest, public interface ReplicaResponse { /** - * The local checkpoint for the shard. + * The persisted local checkpoint for the shard. * - * @return the local checkpoint + * @return the persisted local checkpoint **/ long localCheckpoint(); /** - * The global checkpoint for the shard. + * The persisted global checkpoint for the shard. * - * @return the global checkpoint + * @return the persisted global checkpoint **/ long globalCheckpoint(); diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index d19009433deb5..e338c6e5c329e 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -526,7 +526,7 @@ public void onResponse(Releasable releasable) { final ReplicaResult replicaResult = shardOperationOnReplica(replicaRequest.getRequest(), replica); releasable.close(); // release shard operation lock before responding to caller final TransportReplicationAction.ReplicaResponse response = - new ReplicaResponse(replica.getLocalCheckpoint(), replica.getGlobalCheckpoint()); + new ReplicaResponse(replica.getLocalCheckpoint(), replica.getLastSyncedGlobalCheckpoint()); replicaResult.respond(new ResponseListener(response)); } catch (final Exception e) { Releasables.closeWhileHandlingException(releasable); // release shard operation lock before responding to caller @@ -892,10 +892,6 @@ public void close() { operationLock.close(); } - public long getLocalCheckpoint() { - return indexShard.getLocalCheckpoint(); - } - public ShardRouting routingEntry() { return indexShard.routingEntry(); } @@ -943,7 +939,12 @@ public long localCheckpoint() { @Override public long globalCheckpoint() { - return indexShard.getGlobalCheckpoint(); + return indexShard.getLastSyncedGlobalCheckpoint(); + } + + @Override + public long computedGlobalCheckpoint() { + return indexShard.getLastKnownGlobalCheckpoint(); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index ef4583e98e544..d37a134c0a77c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -389,11 +389,26 @@ private void sendVerifyShardBeforeCloseRequest(final IndexShardRoutingTable shar } final TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), request.taskId()); final TransportVerifyShardBeforeCloseAction.ShardRequest shardRequest = - new TransportVerifyShardBeforeCloseAction.ShardRequest(shardId, closingBlock, parentTaskId); + new TransportVerifyShardBeforeCloseAction.ShardRequest(shardId, closingBlock, true, parentTaskId); if (request.ackTimeout() != null) { shardRequest.timeout(request.ackTimeout()); } - transportVerifyShardBeforeCloseAction.execute(shardRequest, listener); + transportVerifyShardBeforeCloseAction.execute(shardRequest, new ActionListener<>() { + @Override + public void onResponse(ReplicationResponse replicationResponse) { + final TransportVerifyShardBeforeCloseAction.ShardRequest shardRequest = + new TransportVerifyShardBeforeCloseAction.ShardRequest(shardId, closingBlock, false, parentTaskId); + if (request.ackTimeout() != null) { + shardRequest.timeout(request.ackTimeout()); + } + transportVerifyShardBeforeCloseAction.execute(shardRequest, listener); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 07d0f8200a662..e21b816aefd80 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -793,9 +793,9 @@ public final CommitStats commitStats() { } /** - * @return the local checkpoint for this Engine + * @return the persisted local checkpoint for this Engine */ - public abstract long getLocalCheckpoint(); + public abstract long getPersistedLocalCheckpoint(); /** * @return a {@link SeqNoStats} object, using local state and the supplied global checkpoint diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 6249dee2f7ca4..dd4b00f2a3f7f 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -116,6 +116,7 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.function.BiFunction; +import java.util.function.LongConsumer; import java.util.function.LongSupplier; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -204,7 +205,14 @@ public InternalEngine(EngineConfig engineConfig) { throttle = new IndexThrottle(); try { trimUnsafeCommits(engineConfig); - translog = openTranslog(engineConfig, translogDeletionPolicy, engineConfig.getGlobalCheckpointSupplier()); + translog = openTranslog(engineConfig, translogDeletionPolicy, engineConfig.getGlobalCheckpointSupplier(), + seqNo -> { + final LocalCheckpointTracker tracker = getLocalCheckpointTracker(); + assert tracker != null || getTranslog().isOpen() == false; + if (tracker != null) { + tracker.markSeqNoAsPersisted(seqNo); + } + }); assert translog.getGeneration() != null; this.translog = translog; this.softDeleteEnabled = engineConfig.getIndexSettings().isSoftDeleteEnabled(); @@ -241,10 +249,10 @@ public InternalEngine(EngineConfig engineConfig) { for (ReferenceManager.RefreshListener listener: engineConfig.getInternalRefreshListener()) { this.internalSearcherManager.addListener(listener); } - this.lastRefreshedCheckpointListener = new LastRefreshedCheckpointListener(localCheckpointTracker.getCheckpoint()); + this.lastRefreshedCheckpointListener = new LastRefreshedCheckpointListener(localCheckpointTracker.getProcessedCheckpoint()); this.internalSearcherManager.addListener(lastRefreshedCheckpointListener); maxSeqNoOfUpdatesOrDeletes = new AtomicLong(SequenceNumbers.max(localCheckpointTracker.getMaxSeqNo(), translog.getMaxSeqNo())); - if (softDeleteEnabled && localCheckpointTracker.getCheckpoint() < localCheckpointTracker.getMaxSeqNo()) { + if (softDeleteEnabled && localCheckpointTracker.getPersistedCheckpoint() < localCheckpointTracker.getMaxSeqNo()) { try (Searcher searcher = acquireSearcher("restore_version_map_and_checkpoint_tracker", SearcherScope.INTERNAL)) { restoreVersionMapAndCheckpointTracker(Lucene.wrapAllDocsLive(searcher.getDirectoryReader())); } catch (IOException e) { @@ -366,7 +374,7 @@ protected int getRefCount(IndexSearcher reference) { public int restoreLocalHistoryFromTranslog(TranslogRecoveryRunner translogRecoveryRunner) throws IOException { try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); - final long localCheckpoint = localCheckpointTracker.getCheckpoint(); + final long localCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); try (Translog.Snapshot snapshot = getTranslog().newSnapshotFromMinSeqNo(localCheckpoint + 1)) { return translogRecoveryRunner.run(this, snapshot); } @@ -377,19 +385,23 @@ public int restoreLocalHistoryFromTranslog(TranslogRecoveryRunner translogRecove public int fillSeqNoGaps(long primaryTerm) throws IOException { try (ReleasableLock ignored = writeLock.acquire()) { ensureOpen(); - final long localCheckpoint = localCheckpointTracker.getCheckpoint(); + final long localCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); final long maxSeqNo = localCheckpointTracker.getMaxSeqNo(); int numNoOpsAdded = 0; for ( long seqNo = localCheckpoint + 1; seqNo <= maxSeqNo; - seqNo = localCheckpointTracker.getCheckpoint() + 1 /* the local checkpoint might have advanced so we leap-frog */) { + seqNo = localCheckpointTracker.getProcessedCheckpoint() + 1 /* leap-frog the local checkpoint */) { innerNoOp(new NoOp(seqNo, primaryTerm, Operation.Origin.PRIMARY, System.nanoTime(), "filling gaps")); numNoOpsAdded++; - assert seqNo <= localCheckpointTracker.getCheckpoint() - : "local checkpoint did not advance; was [" + seqNo + "], now [" + localCheckpointTracker.getCheckpoint() + "]"; + assert seqNo <= localCheckpointTracker.getProcessedCheckpoint() : + "local checkpoint did not advance; was [" + seqNo + "], now [" + localCheckpointTracker.getProcessedCheckpoint() + "]"; } + syncTranslog(); // to persist noops associated with the advancement of the local checkpoint + assert localCheckpointTracker.getPersistedCheckpoint() == maxSeqNo + : "persisted local checkpoint did not advance to max seq no; is [" + localCheckpointTracker.getPersistedCheckpoint() + + "], max seq no [" + maxSeqNo + "]"; return numNoOpsAdded; } } @@ -467,13 +479,13 @@ private void recoverFromTranslogInternal(TranslogRecoveryRunner translogRecovery } private Translog openTranslog(EngineConfig engineConfig, TranslogDeletionPolicy translogDeletionPolicy, - LongSupplier globalCheckpointSupplier) throws IOException { + LongSupplier globalCheckpointSupplier, LongConsumer persistedSequenceNumberConsumer) throws IOException { final TranslogConfig translogConfig = engineConfig.getTranslogConfig(); final String translogUUID = loadTranslogUUIDFromLastCommit(); // We expect that this shard already exists, so it must already have an existing translog else something is badly wrong! return new Translog(translogConfig, translogUUID, translogDeletionPolicy, globalCheckpointSupplier, - engineConfig.getPrimaryTermSupplier()); + engineConfig.getPrimaryTermSupplier(), persistedSequenceNumberConsumer); } // Package private for testing purposes only @@ -707,7 +719,7 @@ private OpVsLuceneDocStatus compareOpToLuceneDocBasedOnSeqNo(final Operation op) } else if (op.seqNo() > docAndSeqNo.seqNo) { status = OpVsLuceneDocStatus.OP_NEWER; } else if (op.seqNo() == docAndSeqNo.seqNo) { - assert localCheckpointTracker.contains(op.seqNo()) || softDeleteEnabled == false : + assert localCheckpointTracker.hasProcessed(op.seqNo()) || softDeleteEnabled == false : "local checkpoint tracker is not updated seq_no=" + op.seqNo() + " id=" + op.id(); status = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL; } else { @@ -910,7 +922,12 @@ public IndexResult index(Index index) throws IOException { versionMap.maybePutIndexUnderLock(index.uid().bytes(), new IndexVersionValue(translogLocation, plan.versionForIndexing, index.seqNo(), index.primaryTerm())); } - localCheckpointTracker.markSeqNoAsCompleted(indexResult.getSeqNo()); + localCheckpointTracker.markSeqNoAsProcessed(indexResult.getSeqNo()); + if (indexResult.getTranslogLocation() == null) { + // the op is coming from the translog (and is hence persisted already) or it does not have a sequence number + assert index.origin().isFromTranslog() || indexResult.getSeqNo() == SequenceNumbers.UNASSIGNED_SEQ_NO; + localCheckpointTracker.markSeqNoAsPersisted(indexResult.getSeqNo()); + } indexResult.setTook(System.nanoTime() - index.startTime()); indexResult.freeze(); return indexResult; @@ -950,7 +967,7 @@ protected final IndexingStrategy planIndexingAsNonPrimary(Index index) throws IO // unlike the primary, replicas don't really care to about creation status of documents // this allows to ignore the case where a document was found in the live version maps in // a delete state and return false for the created flag in favor of code simplicity - if (index.seqNo() <= localCheckpointTracker.getCheckpoint()){ + if (index.seqNo() <= localCheckpointTracker.getProcessedCheckpoint()){ // the operation seq# is lower then the current local checkpoint and thus was already put into lucene // this can happen during recovery where older operations are sent from the translog that are already // part of the lucene commit (either from a peer recovery or a local translog) @@ -1263,7 +1280,12 @@ public DeleteResult delete(Delete delete) throws IOException { final Translog.Location location = translog.add(new Translog.Delete(delete, deleteResult)); deleteResult.setTranslogLocation(location); } - localCheckpointTracker.markSeqNoAsCompleted(deleteResult.getSeqNo()); + localCheckpointTracker.markSeqNoAsProcessed(deleteResult.getSeqNo()); + if (deleteResult.getTranslogLocation() == null) { + // the op is coming from the translog (and is hence persisted already) or does not have a sequence number (version conflict) + assert delete.origin().isFromTranslog() || deleteResult.getSeqNo() == SequenceNumbers.UNASSIGNED_SEQ_NO; + localCheckpointTracker.markSeqNoAsPersisted(deleteResult.getSeqNo()); + } deleteResult.setTook(System.nanoTime() - delete.startTime()); deleteResult.freeze(); } catch (RuntimeException | IOException e) { @@ -1296,7 +1318,7 @@ protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws // this allows to ignore the case where a document was found in the live version maps in // a delete state and return true for the found flag in favor of code simplicity final DeletionStrategy plan; - if (delete.seqNo() <= localCheckpointTracker.getCheckpoint()) { + if (delete.seqNo() <= localCheckpointTracker.getProcessedCheckpoint()) { // the operation seq# is lower then the current local checkpoint and thus was already put into lucene // this can happen during recovery where older operations are sent from the translog that are already // part of the lucene commit (either from a peer recovery or a local translog) @@ -1470,10 +1492,10 @@ private NoOpResult innerNoOp(final NoOp noOp) throws IOException { try (Releasable ignored = noOpKeyedLock.acquire(seqNo)) { final NoOpResult noOpResult; final Optional preFlightError = preFlightCheckForNoOp(noOp); + Exception failure = null; if (preFlightError.isPresent()) { - noOpResult = new NoOpResult(getPrimaryTerm(), noOp.seqNo(), preFlightError.get()); + noOpResult = new NoOpResult(getPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, preFlightError.get()); } else { - Exception failure = null; markSeqNoAsSeen(noOp.seqNo()); if (softDeleteEnabled) { try { @@ -1506,7 +1528,14 @@ private NoOpResult innerNoOp(final NoOp noOp) throws IOException { noOpResult.setTranslogLocation(location); } } - localCheckpointTracker.markSeqNoAsCompleted(seqNo); + localCheckpointTracker.markSeqNoAsProcessed(noOpResult.getSeqNo()); + if (noOpResult.getTranslogLocation() == null) { + // the op is coming from the translog (and is hence persisted already) or it does not have a sequence number, or we failed + // to add a tombstone doc to Lucene with a non-fatal error, which would be very surprising + // TODO: always fail the engine in the last case, as this creates gaps in the history + assert noOp.origin().isFromTranslog() || noOpResult.getSeqNo() == SequenceNumbers.UNASSIGNED_SEQ_NO || failure != null; + localCheckpointTracker.markSeqNoAsPersisted(noOpResult.getSeqNo()); + } noOpResult.setTook(System.nanoTime() - noOp.startTime()); noOpResult.freeze(); return noOpResult; @@ -1536,7 +1565,7 @@ final boolean refresh(String source, SearcherScope scope, boolean block) throws // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it) // both refresh types will result in an internal refresh but only the external will also // pass the new reader reference to the external reader manager. - final long localCheckpointBeforeRefresh = getLocalCheckpoint(); + final long localCheckpointBeforeRefresh = localCheckpointTracker.getProcessedCheckpoint(); boolean refreshed; try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); @@ -1678,9 +1707,9 @@ public boolean shouldPeriodicallyFlush() { * This method is to maintain translog only, thus IndexWriter#hasUncommittedChanges condition is not considered. */ final long translogGenerationOfNewCommit = - translog.getMinGenerationForSeqNo(localCheckpointTracker.getCheckpoint() + 1).translogFileGeneration; + translog.getMinGenerationForSeqNo(localCheckpointTracker.getProcessedCheckpoint() + 1).translogFileGeneration; return translogGenerationOfLastCommit < translogGenerationOfNewCommit - || localCheckpointTracker.getCheckpoint() == localCheckpointTracker.getMaxSeqNo(); + || localCheckpointTracker.getProcessedCheckpoint() == localCheckpointTracker.getMaxSeqNo(); } @Override @@ -1867,7 +1896,7 @@ private void pruneDeletedTombstones() { */ final long timeMSec = engineConfig.getThreadPool().relativeTimeInMillis(); final long maxTimestampToPrune = timeMSec - engineConfig.getIndexSettings().getGcDeletesInMillis(); - versionMap.pruneTombstones(maxTimestampToPrune, localCheckpointTracker.getCheckpoint()); + versionMap.pruneTombstones(maxTimestampToPrune, localCheckpointTracker.getProcessedCheckpoint()); lastDeleteVersionPruneTimeMSec = timeMSec; } @@ -2356,7 +2385,7 @@ protected void doRun() throws Exception { protected void commitIndexWriter(final IndexWriter writer, final Translog translog, @Nullable final String syncId) throws IOException { ensureCanFlush(); try { - final long localCheckpoint = localCheckpointTracker.getCheckpoint(); + final long localCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); final Translog.TranslogGeneration translogGeneration = translog.getMinGenerationForSeqNo(localCheckpoint + 1); final String translogFileGeneration = Long.toString(translogGeneration.translogFileGeneration); final String translogUUID = translogGeneration.translogUUID; @@ -2441,7 +2470,6 @@ public MergeStats getMergeStats() { return mergeScheduler.stats(); } - // Used only for testing! Package private to prevent anyone else from using it LocalCheckpointTracker getLocalCheckpointTracker() { return localCheckpointTracker; } @@ -2451,9 +2479,13 @@ public long getLastSyncedGlobalCheckpoint() { return getTranslog().getLastSyncedGlobalCheckpoint(); } + public long getProcessedLocalCheckpoint() { + return localCheckpointTracker.getProcessedCheckpoint(); + } + @Override - public long getLocalCheckpoint() { - return localCheckpointTracker.getCheckpoint(); + public long getPersistedLocalCheckpoint() { + return localCheckpointTracker.getPersistedCheckpoint(); } /** @@ -2476,7 +2508,7 @@ protected final boolean hasBeenProcessedBefore(Operation op) { assert versionMap.assertKeyedLockHeldByCurrentThread(op.uid().bytes()); } } - return localCheckpointTracker.contains(op.seqNo()); + return localCheckpointTracker.hasProcessed(op.seqNo()); } @Override @@ -2566,7 +2598,7 @@ public Translog.Snapshot newChangesSnapshot(String source, MapperService mapperS @Override public boolean hasCompleteOperationHistory(String source, MapperService mapperService, long startingSeqNo) throws IOException { - final long currentLocalCheckpoint = getLocalCheckpointTracker().getCheckpoint(); + final long currentLocalCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); // avoid scanning translog if not necessary if (startingSeqNo > currentLocalCheckpoint) { return true; @@ -2576,11 +2608,11 @@ public boolean hasCompleteOperationHistory(String source, MapperService mapperSe Translog.Operation operation; while ((operation = snapshot.next()) != null) { if (operation.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { - tracker.markSeqNoAsCompleted(operation.seqNo()); + tracker.markSeqNoAsProcessed(operation.seqNo()); } } } - return tracker.getCheckpoint() >= currentLocalCheckpoint; + return tracker.getProcessedCheckpoint() >= currentLocalCheckpoint; } /** @@ -2696,7 +2728,7 @@ private final class LastRefreshedCheckpointListener implements ReferenceManager. @Override public void beforeRefresh() { // all changes until this point should be visible after refresh - pendingCheckpoint = localCheckpointTracker.getCheckpoint(); + pendingCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); } @Override @@ -2757,7 +2789,7 @@ private boolean assertMaxSeqNoOfUpdatesIsAdvanced(Term id, long seqNo, boolean a // Operations can be processed on a replica in a different order than on the primary. If the order on the primary is index-1, // delete-2, index-3, and the order on a replica is index-1, index-3, delete-2, then the msu of index-3 on the replica is 2 // even though it is an update (overwrites index-1). We should relax this assertion if there is a pending gap in the seq_no. - if (relaxIfGapInSeqNo && getLocalCheckpoint() < maxSeqNoOfUpdates) { + if (relaxIfGapInSeqNo && localCheckpointTracker.getProcessedCheckpoint() < maxSeqNoOfUpdates) { return true; } assert seqNo <= maxSeqNoOfUpdates : "id=" + id + " seq_no=" + seqNo + " msu=" + maxSeqNoOfUpdates; @@ -2781,7 +2813,7 @@ private static void trimUnsafeCommits(EngineConfig engineConfig) throws IOExcept private void restoreVersionMapAndCheckpointTracker(DirectoryReader directoryReader) throws IOException { final IndexSearcher searcher = new IndexSearcher(directoryReader); searcher.setQueryCache(null); - final Query query = LongPoint.newRangeQuery(SeqNoFieldMapper.NAME, getLocalCheckpoint() + 1, Long.MAX_VALUE); + final Query query = LongPoint.newRangeQuery(SeqNoFieldMapper.NAME, getPersistedLocalCheckpoint() + 1, Long.MAX_VALUE); final Weight weight = searcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1.0f); for (LeafReaderContext leaf : directoryReader.leaves()) { final Scorer scorer = weight.scorer(leaf); @@ -2798,7 +2830,8 @@ private void restoreVersionMapAndCheckpointTracker(DirectoryReader directoryRead continue; // skip children docs which do not have primary term } final long seqNo = dv.docSeqNo(docId); - localCheckpointTracker.markSeqNoAsCompleted(seqNo); + localCheckpointTracker.markSeqNoAsProcessed(seqNo); + localCheckpointTracker.markSeqNoAsPersisted(seqNo); idFieldVisitor.reset(); leaf.reader().document(docId, idFieldVisitor); if (idFieldVisitor.getId() == null) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index 802742a61ea85..79c8331061636 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -329,7 +329,7 @@ public Translog.Location getTranslogLastWriteLocation() { } @Override - public long getLocalCheckpoint() { + public long getPersistedLocalCheckpoint() { return seqNoStats.getLocalCheckpoint(); } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalMapping.java b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalMapping.java index 2bf2abac957de..33073dff2515b 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalMapping.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalMapping.java @@ -21,6 +21,7 @@ import org.apache.lucene.index.OrdinalMap; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LongValues; @@ -34,12 +35,12 @@ final class GlobalOrdinalMapping extends SortedSetDocValues { private final SortedSetDocValues values; private final OrdinalMap ordinalMap; private final LongValues mapping; - private final SortedSetDocValues[] bytesValues; + private final TermsEnum[] lookups; - GlobalOrdinalMapping(OrdinalMap ordinalMap, SortedSetDocValues[] bytesValues, int segmentIndex) { + GlobalOrdinalMapping(OrdinalMap ordinalMap, SortedSetDocValues values, TermsEnum[] lookups, int segmentIndex) { super(); - this.values = bytesValues[segmentIndex]; - this.bytesValues = bytesValues; + this.values = values; + this.lookups = lookups; this.ordinalMap = ordinalMap; this.mapping = ordinalMap.getGlobalOrds(segmentIndex); } @@ -72,7 +73,8 @@ public long nextOrd() throws IOException { public BytesRef lookupOrd(long globalOrd) throws IOException { final long segmentOrd = ordinalMap.getFirstSegmentOrd(globalOrd); int readerIndex = ordinalMap.getFirstSegmentNumber(globalOrd); - return bytesValues[readerIndex].lookupOrd(segmentOrd); + lookups[readerIndex].seekExact(segmentOrd); + return lookups[readerIndex].term(); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsIndexFieldData.java index a869e5a40d4ed..b3c2b4c92597f 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsIndexFieldData.java @@ -22,53 +22,64 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.OrdinalMap; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.SortField; import org.apache.lucene.util.Accountable; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData; -import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.fielddata.plain.AbstractAtomicOrdinalsFieldData; import org.elasticsearch.search.MultiValueMode; +import java.io.IOException; +import java.io.UncheckedIOException; import java.util.Collection; import java.util.Collections; import java.util.function.Function; /** - * {@link IndexFieldData} base class for concrete global ordinals implementations. + * Concrete implementation of {@link IndexOrdinalsFieldData} for global ordinals. + * A single instance of this class should be used to cache global ordinals per {@link DirectoryReader}. + * However {@link #loadGlobal(DirectoryReader)} always creates a new instance of {@link Consumer} from the cached + * value in order to reuse the segment's {@link TermsEnum} that are needed to retrieve terms from global ordinals. + * Each instance of {@link Consumer} uses a new set of {@link TermsEnum} that can be reused during the collection, + * this is done to avoid creating all segment's {@link TermsEnum} each time we want to access the values of a single + * segment. */ -public class GlobalOrdinalsIndexFieldData extends AbstractIndexComponent implements IndexOrdinalsFieldData, Accountable { +public final class GlobalOrdinalsIndexFieldData extends AbstractIndexComponent implements IndexOrdinalsFieldData, Accountable { private final String fieldName; private final long memorySizeInBytes; private final OrdinalMap ordinalMap; - private final Atomic[] atomicReaders; + private final AtomicOrdinalsFieldData[] segmentAfd; private final Function> scriptFunction; - - protected GlobalOrdinalsIndexFieldData(IndexSettings indexSettings, String fieldName, AtomicOrdinalsFieldData[] segmentAfd, - OrdinalMap ordinalMap, long memorySizeInBytes, Function> scriptFunction) { + protected GlobalOrdinalsIndexFieldData(IndexSettings indexSettings, + String fieldName, + AtomicOrdinalsFieldData[] segmentAfd, + OrdinalMap ordinalMap, + long memorySizeInBytes, + Function> scriptFunction) { super(indexSettings); this.fieldName = fieldName; this.memorySizeInBytes = memorySizeInBytes; this.ordinalMap = ordinalMap; - this.atomicReaders = new Atomic[segmentAfd.length]; - for (int i = 0; i < segmentAfd.length; i++) { - atomicReaders[i] = new Atomic(segmentAfd[i], ordinalMap, i); - } + this.segmentAfd = segmentAfd; this.scriptFunction = scriptFunction; } + public IndexOrdinalsFieldData newConsumer(DirectoryReader source) { + return new Consumer(source, indexSettings); + } + @Override public AtomicOrdinalsFieldData loadDirect(LeafReaderContext context) throws Exception { - return load(context); + throw new IllegalStateException("loadDirect(LeafReaderContext) should not be called in this context"); } @Override @@ -92,9 +103,7 @@ public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMod } @Override - public void clear() { - // no need to clear, because this is cached and cleared in AbstractBytesIndexFieldData - } + public void clear() {} @Override public long ramBytesUsed() { @@ -109,7 +118,7 @@ public Collection getChildResources() { @Override public AtomicOrdinalsFieldData load(LeafReaderContext context) { - return atomicReaders[context.ord]; + throw new IllegalStateException("load(LeafReaderContext) should not be called in this context"); } @Override @@ -117,46 +126,108 @@ public OrdinalMap getOrdinalMap() { return ordinalMap; } - private final class Atomic extends AbstractAtomicOrdinalsFieldData { + /** + * A non-thread safe {@link IndexOrdinalsFieldData} for global ordinals that creates the {@link TermsEnum} of each + * segment once and use them to provide a single lookup per segment. + */ + public class Consumer extends AbstractIndexComponent implements IndexOrdinalsFieldData, Accountable { + private final DirectoryReader source; + private TermsEnum[] lookups; + + Consumer(DirectoryReader source, IndexSettings settings) { + super(settings); + this.source = source; + } - private final AtomicOrdinalsFieldData afd; - private final OrdinalMap ordinalMap; - private final int segmentIndex; + /** + * Lazy creation of the {@link TermsEnum} for each segment present in this reader + */ + private TermsEnum[] getOrLoadTermsEnums() { + if (lookups == null) { + lookups = new TermsEnum[segmentAfd.length]; + for (int i = 0; i < lookups.length; i++) { + try { + lookups[i] = segmentAfd[i].getOrdinalsValues().termsEnum(); + } catch (IOException e) { + throw new UncheckedIOException("Failed to load terms enum", e); + } + } + } + return lookups; + } - private Atomic(AtomicOrdinalsFieldData afd, OrdinalMap ordinalMap, int segmentIndex) { - super(scriptFunction); - this.afd = afd; - this.ordinalMap = ordinalMap; - this.segmentIndex = segmentIndex; + @Override + public AtomicOrdinalsFieldData loadDirect(LeafReaderContext context) throws Exception { + return load(context); } @Override - public SortedSetDocValues getOrdinalsValues() { - final SortedSetDocValues values = afd.getOrdinalsValues(); - if (values.getValueCount() == ordinalMap.getValueCount()) { - // segment ordinals match global ordinals - return values; - } - final SortedSetDocValues[] bytesValues = new SortedSetDocValues[atomicReaders.length]; - for (int i = 0; i < bytesValues.length; i++) { - bytesValues[i] = atomicReaders[i].afd.getOrdinalsValues(); - } - return new GlobalOrdinalMapping(ordinalMap, bytesValues, segmentIndex); + public IndexOrdinalsFieldData loadGlobal(DirectoryReader indexReader) { + return this; + } + + @Override + public IndexOrdinalsFieldData localGlobalDirect(DirectoryReader indexReader) throws Exception { + return this; + } + + @Override + public String getFieldName() { + return fieldName; + } + + @Override + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { + throw new UnsupportedOperationException("no global ordinals sorting yet"); } + @Override + public void clear() {} + @Override public long ramBytesUsed() { - return afd.ramBytesUsed(); + return memorySizeInBytes; } @Override public Collection getChildResources() { - return afd.getChildResources(); + return Collections.emptyList(); } @Override - public void close() { + public AtomicOrdinalsFieldData load(LeafReaderContext context) { + assert source.getReaderCacheHelper().getKey() == context.parent.reader().getReaderCacheHelper().getKey(); + return new AbstractAtomicOrdinalsFieldData(scriptFunction) { + @Override + public SortedSetDocValues getOrdinalsValues() { + final SortedSetDocValues values = segmentAfd[context.ord].getOrdinalsValues(); + if (values.getValueCount() == ordinalMap.getValueCount()) { + // segment ordinals match global ordinals + return values; + } + final TermsEnum[] atomicLookups = getOrLoadTermsEnums(); + return new GlobalOrdinalMapping(ordinalMap, values, atomicLookups, context.ord); + } + + @Override + public long ramBytesUsed() { + return segmentAfd[context.ord].ramBytesUsed(); + } + + + @Override + public Collection getChildResources() { + return segmentAfd[context.ord].getChildResources(); + } + + @Override + public void close() {} + }; } + @Override + public OrdinalMap getOrdinalMap() { + return ordinalMap; + } } } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java index 0dc0de838a3e5..a7d63828138eb 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; import org.elasticsearch.index.fielddata.ordinals.GlobalOrdinalsBuilder; +import org.elasticsearch.index.fielddata.ordinals.GlobalOrdinalsIndexFieldData; import org.elasticsearch.indices.breaker.CircuitBreakerService; import java.io.IOException; @@ -60,6 +61,17 @@ public OrdinalMap getOrdinalMap() { @Override public IndexOrdinalsFieldData loadGlobal(DirectoryReader indexReader) { + IndexOrdinalsFieldData fieldData = loadGlobalInternal(indexReader); + if (fieldData instanceof GlobalOrdinalsIndexFieldData) { + // we create a new instance of the cached value for each consumer in order + // to avoid creating new TermsEnums for each segment in the cached instance + return ((GlobalOrdinalsIndexFieldData) fieldData).newConsumer(indexReader); + } else { + return fieldData; + } + } + + private IndexOrdinalsFieldData loadGlobalInternal(DirectoryReader indexReader) { if (indexReader.leaves().size() <= 1) { // ordinals are already global return this; diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVOrdinalsIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVOrdinalsIndexFieldData.java index b71dcc7593417..836c332a340bd 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVOrdinalsIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVOrdinalsIndexFieldData.java @@ -37,6 +37,7 @@ import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; +import org.elasticsearch.index.fielddata.ordinals.GlobalOrdinalsIndexFieldData; import org.elasticsearch.index.fielddata.ordinals.GlobalOrdinalsBuilder; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.search.MultiValueMode; @@ -92,6 +93,17 @@ public AtomicOrdinalsFieldData loadDirect(LeafReaderContext context) throws Exce @Override public IndexOrdinalsFieldData loadGlobal(DirectoryReader indexReader) { + IndexOrdinalsFieldData fieldData = loadGlobalInternal(indexReader); + if (fieldData instanceof GlobalOrdinalsIndexFieldData) { + // we create a new instance of the cached value for each consumer in order + // to avoid creating new TermsEnums for each segment in the cached instance + return ((GlobalOrdinalsIndexFieldData) fieldData).newConsumer(indexReader); + } else { + return fieldData; + } + } + + private IndexOrdinalsFieldData loadGlobalInternal(DirectoryReader indexReader) { if (indexReader.leaves().size() <= 1) { // ordinals are already global return this; diff --git a/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java index d67cbc833d666..70e34623a413a 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java @@ -118,7 +118,7 @@ protected ReplicaResult shardOperationOnReplica(final Request request, final Ind private void maybeSyncTranslog(final IndexShard indexShard) throws IOException { if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST && - indexShard.getLastSyncedGlobalCheckpoint() < indexShard.getGlobalCheckpoint()) { + indexShard.getLastSyncedGlobalCheckpoint() < indexShard.getLastKnownGlobalCheckpoint()) { indexShard.sync(); } } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java index a19d9ac4abb94..185d3b2ad258c 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java @@ -22,6 +22,8 @@ import com.carrotsearch.hppc.LongObjectHashMap; import org.elasticsearch.common.SuppressForbidden; +import java.util.concurrent.atomic.AtomicLong; + /** * This class generates sequences numbers and keeps track of the so-called "local checkpoint" which is the highest number for which all * previous sequence numbers have been processed (inclusive). @@ -35,20 +37,31 @@ public class LocalCheckpointTracker { static final short BIT_SET_SIZE = 1024; /** - * A collection of bit sets representing pending sequence numbers. Each sequence number is mapped to a bit set by dividing by the + * A collection of bit sets representing processed sequence numbers. Each sequence number is mapped to a bit set by dividing by the * bit set size. */ final LongObjectHashMap processedSeqNo = new LongObjectHashMap<>(); /** - * The current local checkpoint, i.e., all sequence numbers no more than this number have been completed. + * A collection of bit sets representing durably persisted sequence numbers. Each sequence number is mapped to a bit set by dividing by + * the bit set size. + */ + final LongObjectHashMap persistedSeqNo = new LongObjectHashMap<>(); + + /** + * The current local checkpoint, i.e., all sequence numbers no more than this number have been processed. + */ + final AtomicLong processedCheckpoint = new AtomicLong(); + + /** + * The current persisted local checkpoint, i.e., all sequence numbers no more than this number have been durably persisted. */ - volatile long checkpoint; + final AtomicLong persistedCheckpoint = new AtomicLong(); /** * The next available sequence number. */ - private volatile long nextSeqNo; + final AtomicLong nextSeqNo = new AtomicLong(); /** * Initialize the local checkpoint service. The {@code maxSeqNo} should be set to the last sequence number assigned, or @@ -68,8 +81,9 @@ public LocalCheckpointTracker(final long maxSeqNo, final long localCheckpoint) { throw new IllegalArgumentException( "max seq. no. must be non-negative or [" + SequenceNumbers.NO_OPS_PERFORMED + "] but was [" + maxSeqNo + "]"); } - nextSeqNo = maxSeqNo == SequenceNumbers.NO_OPS_PERFORMED ? 0 : maxSeqNo + 1; - checkpoint = localCheckpoint; + nextSeqNo.set(maxSeqNo + 1); + processedCheckpoint.set(localCheckpoint); + persistedCheckpoint.set(localCheckpoint); } /** @@ -77,48 +91,67 @@ public LocalCheckpointTracker(final long maxSeqNo, final long localCheckpoint) { * * @return the next assigned sequence number */ - public synchronized long generateSeqNo() { - return nextSeqNo++; + public long generateSeqNo() { + return nextSeqNo.getAndIncrement(); } /** * Marks the provided sequence number as seen and updates the max_seq_no if needed. */ - public synchronized void advanceMaxSeqNo(long seqNo) { - if (seqNo >= nextSeqNo) { - nextSeqNo = seqNo + 1; - } + public void advanceMaxSeqNo(final long seqNo) { + nextSeqNo.accumulateAndGet(seqNo + 1, Math::max); } /** - * Marks the processing of the provided sequence number as completed as updates the checkpoint if possible. + * Marks the provided sequence number as processed and updates the processed checkpoint if possible. * - * @param seqNo the sequence number to mark as completed + * @param seqNo the sequence number to mark as processed */ - public synchronized void markSeqNoAsCompleted(final long seqNo) { + public synchronized void markSeqNoAsProcessed(final long seqNo) { + markSeqNo(seqNo, processedCheckpoint, processedSeqNo); + } + + /** + * Marks the provided sequence number as persisted and updates the checkpoint if possible. + * + * @param seqNo the sequence number to mark as persisted + */ + public synchronized void markSeqNoAsPersisted(final long seqNo) { + markSeqNo(seqNo, persistedCheckpoint, persistedSeqNo); + } + + private void markSeqNo(final long seqNo, final AtomicLong checkPoint, final LongObjectHashMap bitSetMap) { + assert Thread.holdsLock(this); // make sure we track highest seen sequence number - if (seqNo >= nextSeqNo) { - nextSeqNo = seqNo + 1; - } - if (seqNo <= checkpoint) { + advanceMaxSeqNo(seqNo); + if (seqNo <= checkPoint.get()) { // this is possible during recovery where we might replay an operation that was also replicated return; } - final CountedBitSet bitSet = getBitSetForSeqNo(seqNo); + final CountedBitSet bitSet = getBitSetForSeqNo(bitSetMap, seqNo); final int offset = seqNoToBitSetOffset(seqNo); bitSet.set(offset); - if (seqNo == checkpoint + 1) { - updateCheckpoint(); + if (seqNo == checkPoint.get() + 1) { + updateCheckpoint(checkPoint, bitSetMap); } } /** - * The current checkpoint which can be advanced by {@link #markSeqNoAsCompleted(long)}. + * The current checkpoint which can be advanced by {@link #markSeqNoAsProcessed(long)}. * * @return the current checkpoint */ - public long getCheckpoint() { - return checkpoint; + public long getProcessedCheckpoint() { + return processedCheckpoint.get(); + } + + /** + * The current persisted checkpoint which can be advanced by {@link #markSeqNoAsPersisted(long)}. + * + * @return the current persisted checkpoint + */ + public long getPersistedCheckpoint() { + return persistedCheckpoint.get(); } /** @@ -127,17 +160,17 @@ public long getCheckpoint() { * @return the maximum sequence number */ public long getMaxSeqNo() { - return nextSeqNo - 1; + return nextSeqNo.get() - 1; } /** * constructs a {@link SeqNoStats} object, using local state and the supplied global checkpoint * - * This is needed to make sure the local checkpoint and max seq no are consistent + * This is needed to make sure the persisted local checkpoint and max seq no are consistent */ public synchronized SeqNoStats getStats(final long globalCheckpoint) { - return new SeqNoStats(getMaxSeqNo(), getCheckpoint(), globalCheckpoint); + return new SeqNoStats(getMaxSeqNo(), getPersistedCheckpoint(), globalCheckpoint); } /** @@ -147,70 +180,74 @@ public synchronized SeqNoStats getStats(final long globalCheckpoint) { * @throws InterruptedException if the thread was interrupted while blocking on the condition */ @SuppressForbidden(reason = "Object#wait") - public synchronized void waitForOpsToComplete(final long seqNo) throws InterruptedException { - while (checkpoint < seqNo) { + public synchronized void waitForProcessedOpsToComplete(final long seqNo) throws InterruptedException { + while (processedCheckpoint.get() < seqNo) { // notified by updateCheckpoint this.wait(); } } /** - * Checks if the given sequence number was marked as completed in this tracker. + * Checks if the given sequence number was marked as processed in this tracker. */ - public boolean contains(final long seqNo) { + public boolean hasProcessed(final long seqNo) { assert seqNo >= 0 : "invalid seq_no=" + seqNo; - if (seqNo >= nextSeqNo) { + if (seqNo >= nextSeqNo.get()) { return false; } - if (seqNo <= checkpoint) { + if (seqNo <= processedCheckpoint.get()) { return true; } final long bitSetKey = getBitSetKey(seqNo); final int bitSetOffset = seqNoToBitSetOffset(seqNo); synchronized (this) { + // check again under lock + if (seqNo <= processedCheckpoint.get()) { + return true; + } final CountedBitSet bitSet = processedSeqNo.get(bitSetKey); return bitSet != null && bitSet.get(bitSetOffset); } } /** - * Moves the checkpoint to the last consecutively processed sequence number. This method assumes that the sequence number following the - * current checkpoint is processed. + * Moves the checkpoint to the last consecutively processed sequence number. This method assumes that the sequence number + * following the current checkpoint is processed. */ @SuppressForbidden(reason = "Object#notifyAll") - private void updateCheckpoint() { + private void updateCheckpoint(AtomicLong checkPoint, LongObjectHashMap bitSetMap) { assert Thread.holdsLock(this); - assert getBitSetForSeqNo(checkpoint + 1).get(seqNoToBitSetOffset(checkpoint + 1)) : + assert getBitSetForSeqNo(bitSetMap, checkPoint.get() + 1).get(seqNoToBitSetOffset(checkPoint.get() + 1)) : "updateCheckpoint is called but the bit following the checkpoint is not set"; try { // keep it simple for now, get the checkpoint one by one; in the future we can optimize and read words - long bitSetKey = getBitSetKey(checkpoint); - CountedBitSet current = processedSeqNo.get(bitSetKey); + long bitSetKey = getBitSetKey(checkPoint.get()); + CountedBitSet current = bitSetMap.get(bitSetKey); if (current == null) { // the bit set corresponding to the checkpoint has already been removed, set ourselves up for the next bit set - assert checkpoint % BIT_SET_SIZE == BIT_SET_SIZE - 1; - current = processedSeqNo.get(++bitSetKey); + assert checkPoint.get() % BIT_SET_SIZE == BIT_SET_SIZE - 1; + current = bitSetMap.get(++bitSetKey); } do { - checkpoint++; + checkPoint.incrementAndGet(); /* * The checkpoint always falls in the current bit set or we have already cleaned it; if it falls on the last bit of the * current bit set, we can clean it. */ - if (checkpoint == lastSeqNoInBitSet(bitSetKey)) { + if (checkPoint.get() == lastSeqNoInBitSet(bitSetKey)) { assert current != null; - final CountedBitSet removed = processedSeqNo.remove(bitSetKey); + final CountedBitSet removed = bitSetMap.remove(bitSetKey); assert removed == current; - current = processedSeqNo.get(++bitSetKey); + current = bitSetMap.get(++bitSetKey); } - } while (current != null && current.get(seqNoToBitSetOffset(checkpoint + 1))); + } while (current != null && current.get(seqNoToBitSetOffset(checkPoint.get() + 1))); } finally { - // notifies waiters in waitForOpsToComplete + // notifies waiters in waitForProcessedOpsToComplete this.notifyAll(); } } - private long lastSeqNoInBitSet(final long bitSetKey) { + private static long lastSeqNoInBitSet(final long bitSetKey) { return (1 + bitSetKey) * BIT_SET_SIZE - 1; } @@ -220,32 +257,32 @@ private long lastSeqNoInBitSet(final long bitSetKey) { * @param seqNo the sequence number to obtain the bit set for * @return the bit set corresponding to the provided sequence number */ - private long getBitSetKey(final long seqNo) { + private static long getBitSetKey(final long seqNo) { return seqNo / BIT_SET_SIZE; } - private CountedBitSet getBitSetForSeqNo(final long seqNo) { + private CountedBitSet getBitSetForSeqNo(final LongObjectHashMap bitSetMap, final long seqNo) { assert Thread.holdsLock(this); final long bitSetKey = getBitSetKey(seqNo); - final int index = processedSeqNo.indexOf(bitSetKey); + final int index = bitSetMap.indexOf(bitSetKey); final CountedBitSet bitSet; - if (processedSeqNo.indexExists(index)) { - bitSet = processedSeqNo.indexGet(index); + if (bitSetMap.indexExists(index)) { + bitSet = bitSetMap.indexGet(index); } else { bitSet = new CountedBitSet(BIT_SET_SIZE); - processedSeqNo.indexInsert(index, bitSetKey, bitSet); + bitSetMap.indexInsert(index, bitSetKey, bitSet); } return bitSet; } /** * Obtain the position in the bit set corresponding to the provided sequence number. The bit set corresponding to the sequence number - * can be obtained via {@link #getBitSetForSeqNo(long)}. + * can be obtained via {@link #getBitSetForSeqNo(LongObjectHashMap, long)}. * * @param seqNo the sequence number to obtain the position for * @return the position in the bit set corresponding to the provided sequence number */ - private int seqNoToBitSetOffset(final long seqNo) { + private static int seqNoToBitSetOffset(final long seqNo) { return Math.toIntExact(seqNo % BIT_SET_SIZE); } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 1a67eb55e0576..eb1180f2294bf 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -146,9 +146,15 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L final Map checkpoints; /** - * A callback invoked when the global checkpoint is updated. For primary mode this occurs if the computed global checkpoint advances on - * the basis of state changes tracked here. For non-primary mode this occurs if the local knowledge of the global checkpoint advances - * due to an update from the primary. + * The current in-memory global checkpoint. In primary mode, this is a cached version of the checkpoint computed from the local + * checkpoints. In replica mode, this is the in-memory global checkpoint that's communicated by the primary. + */ + volatile long globalCheckpoint; + + /** + * A callback invoked when the in-memory global checkpoint is updated. For primary mode this occurs if the computed global checkpoint + * advances on the basis of state changes tracked here. For non-primary mode this occurs if the local knowledge of the global checkpoint + * advances due to an update from the primary. */ private final LongConsumer onGlobalCheckpointUpdated; @@ -393,13 +399,13 @@ public boolean assertRetentionLeasesPersisted(final Path path) throws IOExceptio public static class CheckpointState implements Writeable { /** - * the last local checkpoint information that we have for this shard + * the last local checkpoint information that we have for this shard. All operations up to this point are properly fsynced to disk. */ long localCheckpoint; /** - * the last global checkpoint information that we have for this shard. This information is computed for the primary if - * the tracker is in primary mode and received from the primary if in replica mode. + * the last global checkpoint information that we have for this shard. This is the global checkpoint that's fsynced to disk on the + * respective shard, and all operations up to this point are properly fsynced to disk as well. */ long globalCheckpoint; /** @@ -483,9 +489,9 @@ public int hashCode() { } /** - * Get the local knowledge of the global checkpoints for all in-sync allocation IDs. + * Get the local knowledge of the persisted global checkpoints for all in-sync allocation IDs. * - * @return a map from allocation ID to the local knowledge of the global checkpoint for that allocation ID + * @return a map from allocation ID to the local knowledge of the persisted global checkpoint for that allocation ID */ public synchronized ObjectLongMap getInSyncGlobalCheckpoints() { assert primaryMode; @@ -538,20 +544,11 @@ public boolean isRelocated() { * as a logical operator, many of the invariants are written under the form (!A || B), they should be read as (A implies B) however. */ private boolean invariant() { - assert checkpoints.get(shardAllocationId) != null : - "checkpoints map should always have an entry for the current shard"; - // local checkpoints only set during primary mode assert primaryMode || checkpoints.values().stream().allMatch(lcps -> lcps.localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO); - // global checkpoints for other shards only set during primary mode - assert primaryMode - || checkpoints - .entrySet() - .stream() - .filter(e -> e.getKey().equals(shardAllocationId) == false) - .map(Map.Entry::getValue) - .allMatch(cps -> cps.globalCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO); + // global checkpoints only set during primary mode + assert primaryMode || checkpoints.values().stream().allMatch(cps -> cps.globalCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO); // relocation handoff can only occur in primary mode assert !handoffInProgress || primaryMode; @@ -580,14 +577,14 @@ private boolean invariant() { // the computed global checkpoint is always up-to-date assert !primaryMode - || getGlobalCheckpoint() == computeGlobalCheckpoint(pendingInSync, checkpoints.values(), getGlobalCheckpoint()) + || globalCheckpoint == computeGlobalCheckpoint(pendingInSync, checkpoints.values(), globalCheckpoint) : "global checkpoint is not up-to-date, expected: " + - computeGlobalCheckpoint(pendingInSync, checkpoints.values(), getGlobalCheckpoint()) + " but was: " + getGlobalCheckpoint(); + computeGlobalCheckpoint(pendingInSync, checkpoints.values(), globalCheckpoint) + " but was: " + globalCheckpoint; // when in primary mode, the global checkpoint is at most the minimum local checkpoint on all in-sync shard copies assert !primaryMode - || getGlobalCheckpoint() <= inSyncCheckpointStates(checkpoints, CheckpointState::getLocalCheckpoint, LongStream::min) - : "global checkpoint [" + getGlobalCheckpoint() + "] " + || globalCheckpoint <= inSyncCheckpointStates(checkpoints, CheckpointState::getLocalCheckpoint, LongStream::min) + : "global checkpoint [" + globalCheckpoint + "] " + "for primary mode allocation ID [" + shardAllocationId + "] " + "more than in-sync local checkpoints [" + checkpoints + "]"; @@ -661,8 +658,8 @@ public ReplicationTracker( this.operationPrimaryTerm = operationPrimaryTerm; this.handoffInProgress = false; this.appliedClusterStateVersion = -1L; + this.globalCheckpoint = globalCheckpoint; this.checkpoints = new HashMap<>(1 + indexSettings.getNumberOfReplicas()); - checkpoints.put(allocationId, new CheckpointState(SequenceNumbers.UNASSIGNED_SEQ_NO, globalCheckpoint, false, false)); this.onGlobalCheckpointUpdated = Objects.requireNonNull(onGlobalCheckpointUpdated); this.currentTimeMillisSupplier = Objects.requireNonNull(currentTimeMillisSupplier); this.onSyncRetentionLeases = Objects.requireNonNull(onSyncRetentionLeases); @@ -689,28 +686,26 @@ private ReplicationGroup calculateReplicationGroup() { } /** - * Returns the global checkpoint for the shard. + * Returns the in-memory global checkpoint for the shard. * * @return the global checkpoint */ - public synchronized long getGlobalCheckpoint() { - final CheckpointState cps = checkpoints.get(shardAllocationId); - assert cps != null; - return cps.globalCheckpoint; + public long getGlobalCheckpoint() { + return globalCheckpoint; } @Override public long getAsLong() { - return getGlobalCheckpoint(); + return globalCheckpoint; } /** * Updates the global checkpoint on a replica shard after it has been updated by the primary. * - * @param globalCheckpoint the global checkpoint - * @param reason the reason the global checkpoint was updated + * @param newGlobalCheckpoint the new global checkpoint + * @param reason the reason the global checkpoint was updated */ - public synchronized void updateGlobalCheckpointOnReplica(final long globalCheckpoint, final String reason) { + public synchronized void updateGlobalCheckpointOnReplica(final long newGlobalCheckpoint, final String reason) { assert invariant(); assert primaryMode == false; /* @@ -719,18 +714,17 @@ public synchronized void updateGlobalCheckpointOnReplica(final long globalCheckp * replica shards). In these cases, the local knowledge of the global checkpoint could be higher than the sync from the lagging * primary. */ - updateGlobalCheckpoint( - shardAllocationId, - globalCheckpoint, - current -> { - logger.trace("updated global checkpoint from [{}] to [{}] due to [{}]", current, globalCheckpoint, reason); - onGlobalCheckpointUpdated.accept(globalCheckpoint); - }); + final long previousGlobalCheckpoint = globalCheckpoint; + if (newGlobalCheckpoint > previousGlobalCheckpoint) { + globalCheckpoint = newGlobalCheckpoint; + logger.trace("updated global checkpoint from [{}] to [{}] due to [{}]", previousGlobalCheckpoint, globalCheckpoint, reason); + onGlobalCheckpointUpdated.accept(globalCheckpoint); + } assert invariant(); } /** - * Update the local knowledge of the global checkpoint for the specified allocation ID. + * Update the local knowledge of the persisted global checkpoint for the specified allocation ID. * * @param allocationId the allocation ID to update the global checkpoint for * @param globalCheckpoint the global checkpoint @@ -739,24 +733,15 @@ public synchronized void updateGlobalCheckpointForShard(final String allocationI assert primaryMode; assert handoffInProgress == false; assert invariant(); - updateGlobalCheckpoint( - allocationId, - globalCheckpoint, - current -> logger.trace( - "updated local knowledge for [{}] on the primary of the global checkpoint from [{}] to [{}]", - allocationId, - current, - globalCheckpoint)); - assert invariant(); - } - - private void updateGlobalCheckpoint(final String allocationId, final long globalCheckpoint, LongConsumer ifUpdated) { final CheckpointState cps = checkpoints.get(allocationId); assert !this.shardAllocationId.equals(allocationId) || cps != null; if (cps != null && globalCheckpoint > cps.globalCheckpoint) { + final long previousGlobalCheckpoint = cps.globalCheckpoint; cps.globalCheckpoint = globalCheckpoint; - ifUpdated.accept(cps.globalCheckpoint); + logger.trace("updated local knowledge for [{}] on the primary of the global checkpoint from [{}] to [{}]", + allocationId, previousGlobalCheckpoint, globalCheckpoint); } + assert invariant(); } /** @@ -814,23 +799,14 @@ public synchronized void updateFromMaster(final long applyingClusterStateVersion } } else { for (String initializingId : initializingAllocationIds) { - if (shardAllocationId.equals(initializingId) == false) { - final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; - final long globalCheckpoint = localCheckpoint; - checkpoints.put(initializingId, new CheckpointState(localCheckpoint, globalCheckpoint, false, false)); - } + final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; + final long globalCheckpoint = localCheckpoint; + checkpoints.put(initializingId, new CheckpointState(localCheckpoint, globalCheckpoint, false, false)); } for (String inSyncId : inSyncAllocationIds) { - if (shardAllocationId.equals(inSyncId)) { - // current shard is initially marked as not in-sync because we don't know better at that point - CheckpointState checkpointState = checkpoints.get(shardAllocationId); - checkpointState.inSync = true; - checkpointState.tracked = true; - } else { - final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; - final long globalCheckpoint = localCheckpoint; - checkpoints.put(inSyncId, new CheckpointState(localCheckpoint, globalCheckpoint, true, true)); - } + final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; + final long globalCheckpoint = localCheckpoint; + checkpoints.put(inSyncId, new CheckpointState(localCheckpoint, globalCheckpoint, true, true)); } } appliedClusterStateVersion = applyingClusterStateVersion; @@ -990,13 +966,11 @@ private static long computeGlobalCheckpoint(final Set pendingInSync, fin */ private synchronized void updateGlobalCheckpointOnPrimary() { assert primaryMode; - final CheckpointState cps = checkpoints.get(shardAllocationId); - final long globalCheckpoint = cps.globalCheckpoint; final long computedGlobalCheckpoint = computeGlobalCheckpoint(pendingInSync, checkpoints.values(), getGlobalCheckpoint()); assert computedGlobalCheckpoint >= globalCheckpoint : "new global checkpoint [" + computedGlobalCheckpoint + "] is lower than previous one [" + globalCheckpoint + "]"; if (globalCheckpoint != computedGlobalCheckpoint) { - cps.globalCheckpoint = computedGlobalCheckpoint; + globalCheckpoint = computedGlobalCheckpoint; logger.trace("updated global checkpoint to [{}]", computedGlobalCheckpoint); onGlobalCheckpointUpdated.accept(computedGlobalCheckpoint); } @@ -1046,13 +1020,10 @@ public synchronized void completeRelocationHandoff() { primaryMode = false; handoffInProgress = false; relocated = true; - // forget all checkpoint information except for global checkpoint of current shard + // forget all checkpoint information checkpoints.forEach((key, cps) -> { cps.localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; - if (key.equals(shardAllocationId) == false) { - // don't throw global checkpoint information of current shard away - cps.globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; - } + cps.globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; }); assert invariant(); } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/SeqNoStats.java b/server/src/main/java/org/elasticsearch/index/seqno/SeqNoStats.java index a56f8670c23b1..e1b992643fac8 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/SeqNoStats.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/SeqNoStats.java @@ -58,7 +58,7 @@ public long getMaxSeqNo() { return maxSeqNo; } - /** the maximum sequence number for which all previous operations (including) have been completed */ + /** the maximum sequence number for which all previous operations (including) have been persisted */ public long getLocalCheckpoint() { return localCheckpoint; } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index fdd95614756b7..9f7e51f0da0cd 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -540,7 +540,8 @@ public void updateShardState(final ShardRouting newRouting, */ engine.rollTranslogGeneration(); engine.fillSeqNoGaps(newPrimaryTerm); - replicationTracker.updateLocalCheckpoint(currentRouting.allocationId().getId(), getLocalCheckpoint()); + replicationTracker.updateLocalCheckpoint(currentRouting.allocationId().getId(), + getLocalCheckpoint()); primaryReplicaSyncer.accept(this, new ActionListener() { @Override public void onResponse(ResyncTask resyncTask) { @@ -1865,7 +1866,7 @@ public void updateLocalCheckpointForShard(final String allocationId, final long } /** - * Update the local knowledge of the global checkpoint for the specified allocation ID. + * Update the local knowledge of the persisted global checkpoint for the specified allocation ID. * * @param allocationId the allocation ID to update the global checkpoint for * @param globalCheckpoint the global checkpoint @@ -2079,12 +2080,12 @@ public void markAllocationIdAsInSync(final String allocationId, final long local } /** - * Returns the local checkpoint for the shard. + * Returns the persisted local checkpoint for the shard. * * @return the local checkpoint */ public long getLocalCheckpoint() { - return getEngine().getLocalCheckpoint(); + return getEngine().getPersistedLocalCheckpoint(); } /** @@ -2092,7 +2093,7 @@ public long getLocalCheckpoint() { * * @return the global checkpoint */ - public long getGlobalCheckpoint() { + public long getLastKnownGlobalCheckpoint() { return replicationTracker.getGlobalCheckpoint(); } @@ -2125,15 +2126,19 @@ public void maybeSyncGlobalCheckpoint(final String reason) { return; } assert assertPrimaryMode(); - // only sync if there are not operations in flight + // only sync if there are no operations in flight, or when using async durability final SeqNoStats stats = getEngine().getSeqNoStats(replicationTracker.getGlobalCheckpoint()); - if (stats.getMaxSeqNo() == stats.getGlobalCheckpoint()) { + final boolean asyncDurability = indexSettings().getTranslogDurability() == Translog.Durability.ASYNC; + if (stats.getMaxSeqNo() == stats.getGlobalCheckpoint() || asyncDurability) { final ObjectLongMap globalCheckpoints = getInSyncGlobalCheckpoints(); - final String allocationId = routingEntry().allocationId().getId(); - assert globalCheckpoints.containsKey(allocationId); - final long globalCheckpoint = globalCheckpoints.get(allocationId); + final long globalCheckpoint = replicationTracker.getGlobalCheckpoint(); + // async durability means that the local checkpoint might lag (as it is only advanced on fsync) + // periodically ask for the newest local checkpoint by syncing the global checkpoint, so that ultimately the global + // checkpoint can be synced final boolean syncNeeded = - StreamSupport + (asyncDurability && stats.getGlobalCheckpoint() < stats.getMaxSeqNo()) + // check if the persisted global checkpoint + || StreamSupport .stream(globalCheckpoints.values().spliterator(), false) .anyMatch(v -> v.value < globalCheckpoint); // only sync if index is not closed and there is a shard lagging the primary @@ -2192,7 +2197,8 @@ public void activateWithPrimaryContext(final ReplicationTracker.PrimaryContext p assert shardRouting.primary() && shardRouting.isRelocationTarget() : "only primary relocation target can update allocation IDs from primary context: " + shardRouting; assert primaryContext.getCheckpointStates().containsKey(routingEntry().allocationId().getId()) && - getLocalCheckpoint() == primaryContext.getCheckpointStates().get(routingEntry().allocationId().getId()).getLocalCheckpoint(); + getLocalCheckpoint() == primaryContext.getCheckpointStates().get(routingEntry().allocationId().getId()) + .getLocalCheckpoint(); synchronized (mutex) { replicationTracker.activateWithPrimaryContext(primaryContext); // make changes to primaryMode flag only under mutex } @@ -2733,7 +2739,7 @@ private void innerAcquireReplicaOperationPermit(final long opPrimaryTerm, bumpPrimaryTerm(opPrimaryTerm, () -> { updateGlobalCheckpointOnReplica(globalCheckpoint, "primary term transition"); - final long currentGlobalCheckpoint = getGlobalCheckpoint(); + final long currentGlobalCheckpoint = getLastKnownGlobalCheckpoint(); final long maxSeqNo = seqNoStats().getMaxSeqNo(); logger.info("detected new primary with primary term [{}], global checkpoint [{}], max_seq_no [{}]", opPrimaryTerm, currentGlobalCheckpoint, maxSeqNo); @@ -3103,7 +3109,8 @@ assert getActiveOperationsCount() == OPERATIONS_BLOCKED flush(new FlushRequest().waitIfOngoing(true)); SetOnce newEngineReference = new SetOnce<>(); - final long globalCheckpoint = getGlobalCheckpoint(); + final long globalCheckpoint = getLastKnownGlobalCheckpoint(); + assert globalCheckpoint == getLastSyncedGlobalCheckpoint(); synchronized (mutex) { verifyNotClosed(); // we must create both new read-only engine and new read-write engine under mutex to ensure snapshotStoreMetadata, diff --git a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java index 07aade952923b..17ef424185d1f 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java +++ b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java @@ -84,7 +84,7 @@ void setChunkSize(ByteSizeValue chunkSize) { // only settable for tests public void resync(final IndexShard indexShard, final ActionListener listener) { Translog.Snapshot snapshot = null; try { - final long startingSeqNo = indexShard.getGlobalCheckpoint() + 1; + final long startingSeqNo = indexShard.getLastKnownGlobalCheckpoint() + 1; final long maxSeqNo = indexShard.seqNoStats().getMaxSeqNo(); final ShardId shardId = indexShard.shardId(); // Wrap translog snapshot to make it synchronized as it is accessed by different threads through SnapshotSender. diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index 7626270b6cdc5..82ad2046c510b 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -63,6 +63,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.LongConsumer; import java.util.function.LongSupplier; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -129,6 +130,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private final LongSupplier primaryTermSupplier; private final String translogUUID; private final TranslogDeletionPolicy deletionPolicy; + private final LongConsumer persistedSequenceNumberConsumer; /** * Creates a new Translog instance. This method will create a new transaction log unless the given {@link TranslogGeneration} is @@ -146,14 +148,18 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC * examined and stored in the header whenever a new generation is rolled. It's guaranteed from outside * that a new generation is rolled when the term is increased. This guarantee allows to us to validate * and reject operation whose term is higher than the primary term stored in the translog header. + * @param persistedSequenceNumberConsumer a callback that's called whenever an operation with a given sequence number is successfully + * persisted. */ public Translog( final TranslogConfig config, final String translogUUID, TranslogDeletionPolicy deletionPolicy, - final LongSupplier globalCheckpointSupplier, final LongSupplier primaryTermSupplier) throws IOException { + final LongSupplier globalCheckpointSupplier, final LongSupplier primaryTermSupplier, + final LongConsumer persistedSequenceNumberConsumer) throws IOException { super(config.getShardId(), config.getIndexSettings()); this.config = config; this.globalCheckpointSupplier = globalCheckpointSupplier; this.primaryTermSupplier = primaryTermSupplier; + this.persistedSequenceNumberConsumer = persistedSequenceNumberConsumer; this.deletionPolicy = deletionPolicy; this.translogUUID = translogUUID; bigArrays = config.getBigArrays(); @@ -190,7 +196,8 @@ public Translog( boolean success = false; current = null; try { - current = createWriter(checkpoint.generation + 1, getMinFileGeneration(), checkpoint.globalCheckpoint); + current = createWriter(checkpoint.generation + 1, getMinFileGeneration(), checkpoint.globalCheckpoint, + persistedSequenceNumberConsumer); success = true; } finally { // we have to close all the recovered ones otherwise we leak file handles here @@ -471,7 +478,8 @@ public long sizeInBytesByMinGen(long minGeneration) { * @throws IOException if creating the translog failed */ TranslogWriter createWriter(long fileGeneration) throws IOException { - final TranslogWriter writer = createWriter(fileGeneration, getMinFileGeneration(), globalCheckpointSupplier.getAsLong()); + final TranslogWriter writer = createWriter(fileGeneration, getMinFileGeneration(), globalCheckpointSupplier.getAsLong(), + persistedSequenceNumberConsumer); assert writer.sizeInBytes() == DEFAULT_HEADER_SIZE_IN_BYTES : "Mismatch translog header size; " + "empty translog size [" + writer.sizeInBytes() + ", header size [" + DEFAULT_HEADER_SIZE_IN_BYTES + "]"; return writer; @@ -486,7 +494,8 @@ TranslogWriter createWriter(long fileGeneration) throws IOException { * With no readers and no current, a call to {@link #getMinFileGeneration()} would not work. * @param initialGlobalCheckpoint the global checkpoint to be written in the first checkpoint. */ - TranslogWriter createWriter(long fileGeneration, long initialMinTranslogGen, long initialGlobalCheckpoint) throws IOException { + TranslogWriter createWriter(long fileGeneration, long initialMinTranslogGen, long initialGlobalCheckpoint, + LongConsumer persistedSequenceNumberConsumer) throws IOException { final TranslogWriter newFile; try { newFile = TranslogWriter.create( @@ -497,7 +506,8 @@ TranslogWriter createWriter(long fileGeneration, long initialMinTranslogGen, lon getChannelFactory(), config.getBufferSize(), initialMinTranslogGen, initialGlobalCheckpoint, - globalCheckpointSupplier, this::getMinFileGeneration, primaryTermSupplier.getAsLong(), tragedy); + globalCheckpointSupplier, this::getMinFileGeneration, primaryTermSupplier.getAsLong(), tragedy, + persistedSequenceNumberConsumer); } catch (final IOException e) { throw new TranslogException(shardId, "failed to create new translog file", e); } @@ -1868,7 +1878,7 @@ static String createEmptyTranslog(Path location, long initialGlobalCheckpoint, S location.resolve(getFilename(1)), channelFactory, new ByteSizeValue(10), 1, initialGlobalCheckpoint, () -> { throw new UnsupportedOperationException(); }, () -> { throw new UnsupportedOperationException(); }, primaryTerm, - new TragicExceptionHolder()); + new TragicExceptionHolder(), seqNo -> { throw new UnsupportedOperationException(); }); writer.close(); return translogUUID; } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index 6b00b0c5db3ff..0695a2bf65010 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -19,6 +19,8 @@ package org.elasticsearch.index.translog; +import com.carrotsearch.hppc.LongArrayList; +import com.carrotsearch.hppc.procedures.LongProcedure; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Assertions; @@ -42,6 +44,7 @@ import java.util.Map; import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.LongConsumer; import java.util.function.LongSupplier; public class TranslogWriter extends BaseTranslogReader implements Closeable { @@ -64,10 +67,15 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { private final LongSupplier globalCheckpointSupplier; private final LongSupplier minTranslogGenerationSupplier; + // callback that's called whenever an operation with a given sequence number is successfully persisted. + private final LongConsumer persistedSequenceNumberConsumer; + protected final AtomicBoolean closed = new AtomicBoolean(false); // lock order synchronized(syncLock) -> synchronized(this) private final Object syncLock = new Object(); + private LongArrayList nonFsyncedSequenceNumbers; + private final Map> seenSequenceNumbers; private TranslogWriter( @@ -78,7 +86,8 @@ private TranslogWriter( final Path path, final ByteSizeValue bufferSize, final LongSupplier globalCheckpointSupplier, LongSupplier minTranslogGenerationSupplier, TranslogHeader header, - TragicExceptionHolder tragedy) + TragicExceptionHolder tragedy, + final LongConsumer persistedSequenceNumberConsumer) throws IOException { super(initialCheckpoint.generation, channel, path, header); @@ -97,6 +106,8 @@ private TranslogWriter( this.maxSeqNo = initialCheckpoint.maxSeqNo; assert initialCheckpoint.trimmedAboveSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO : initialCheckpoint.trimmedAboveSeqNo; this.globalCheckpointSupplier = globalCheckpointSupplier; + this.nonFsyncedSequenceNumbers = new LongArrayList(64); + this.persistedSequenceNumberConsumer = persistedSequenceNumberConsumer; this.seenSequenceNumbers = Assertions.ENABLED ? new HashMap<>() : null; this.tragedy = tragedy; } @@ -104,7 +115,7 @@ private TranslogWriter( public static TranslogWriter create(ShardId shardId, String translogUUID, long fileGeneration, Path file, ChannelFactory channelFactory, ByteSizeValue bufferSize, final long initialMinTranslogGen, long initialGlobalCheckpoint, final LongSupplier globalCheckpointSupplier, final LongSupplier minTranslogGenerationSupplier, - final long primaryTerm, TragicExceptionHolder tragedy) + final long primaryTerm, TragicExceptionHolder tragedy, LongConsumer persistedSequenceNumberConsumer) throws IOException { final FileChannel channel = channelFactory.open(file); try { @@ -125,7 +136,7 @@ public static TranslogWriter create(ShardId shardId, String translogUUID, long f writerGlobalCheckpointSupplier = globalCheckpointSupplier; } return new TranslogWriter(channelFactory, shardId, checkpoint, channel, file, bufferSize, - writerGlobalCheckpointSupplier, minTranslogGenerationSupplier, header, tragedy); + writerGlobalCheckpointSupplier, minTranslogGenerationSupplier, header, tragedy, persistedSequenceNumberConsumer); } catch (Exception exception) { // if we fail to bake the file-generation into the checkpoint we stick with the file and once we recover and that // file exists we remove it. We only apply this logic to the checkpoint.generation+1 any other file with a higher generation @@ -177,6 +188,8 @@ public synchronized Translog.Location add(final BytesReference data, final long minSeqNo = SequenceNumbers.min(minSeqNo, seqNo); maxSeqNo = SequenceNumbers.max(maxSeqNo, seqNo); + nonFsyncedSequenceNumbers.add(seqNo); + operationCounter++; assert assertNoSeqNumberConflict(seqNo, data); @@ -338,7 +351,9 @@ private long getWrittenOffset() throws IOException { * @return true if this call caused an actual sync operation */ public boolean syncUpTo(long offset) throws IOException { + boolean synced = false; if (lastSyncedCheckpoint.offset < offset && syncNeeded()) { + LongArrayList flushedSequenceNumbers = null; synchronized (syncLock) { // only one sync/checkpoint should happen concurrently but we wait if (lastSyncedCheckpoint.offset < offset && syncNeeded()) { // double checked locking - we don't want to fsync unless we have to and now that we have @@ -349,6 +364,8 @@ public boolean syncUpTo(long offset) throws IOException { try { outputStream.flush(); checkpointToSync = getCheckpoint(); + flushedSequenceNumbers = nonFsyncedSequenceNumbers; + nonFsyncedSequenceNumbers = new LongArrayList(64); } catch (final Exception ex) { closeWithTragicEvent(ex); throw ex; @@ -366,11 +383,14 @@ public boolean syncUpTo(long offset) throws IOException { assert lastSyncedCheckpoint.offset <= checkpointToSync.offset : "illegal state: " + lastSyncedCheckpoint.offset + " <= " + checkpointToSync.offset; lastSyncedCheckpoint = checkpointToSync; // write protected by syncLock - return true; + synced = true; } } + if (flushedSequenceNumbers != null) { + flushedSequenceNumbers.forEach((LongProcedure) persistedSequenceNumberConsumer::accept); + } } - return false; + return synced; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java index 01a7836d81358..7cf165a5b112d 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java @@ -181,7 +181,7 @@ private boolean isTranslogClean(ShardPath shardPath, String translogUUID) throws new TranslogDeletionPolicy(indexSettings.getTranslogRetentionSize().getBytes(), indexSettings.getTranslogRetentionAge().getMillis()); try (Translog translog = new Translog(translogConfig, translogUUID, - translogDeletionPolicy, () -> translogGlobalCheckpoint, () -> primaryTerm); + translogDeletionPolicy, () -> translogGlobalCheckpoint, () -> primaryTerm, seqNo -> {}); Translog.Snapshot snapshot = translog.newSnapshot()) { //noinspection StatementWithEmptyBody we are just checking that we can iterate through the whole snapshot while (snapshot.next() != null) { diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 4b89e75691a76..fdada82c5bc56 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -175,7 +175,7 @@ public void recoverToTarget(ActionListener listener) { startingSeqNo = 0; try { final int estimateNumOps = shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo); - sendFileResult = phase1(phase1Snapshot.getIndexCommit(), shard.getGlobalCheckpoint(), () -> estimateNumOps); + sendFileResult = phase1(phase1Snapshot.getIndexCommit(), shard.getLastKnownGlobalCheckpoint(), () -> estimateNumOps); } catch (final Exception e) { throw new RecoveryEngineException(shard.shardId(), 1, "phase1 failed", e); } finally { @@ -641,7 +641,7 @@ void finalizeRecovery(final long targetLocalCheckpoint, final ActionListener shard.markAllocationIdAsInSync(request.targetAllocationId(), targetLocalCheckpoint), shardId + " marking " + request.targetAllocationId() + " as in sync", shard, cancellableThreads, logger); - final long globalCheckpoint = shard.getGlobalCheckpoint(); + final long globalCheckpoint = shard.getLastKnownGlobalCheckpoint(); // this global checkpoint is persisted in finalizeRecovery final StepListener finalizeListener = new StepListener<>(); cancellableThreads.executeIO(() -> recoveryTarget.finalizeRecovery(globalCheckpoint, finalizeListener)); finalizeListener.whenComplete(r -> { @@ -709,7 +709,8 @@ void sendFiles(Store store, StoreFileMetaData[] files, Supplier translo final BytesArray content = new BytesArray(buffer, 0, bytesRead); final boolean lastChunk = position + content.length() == md.length(); final long requestSeqId = requestSeqIdTracker.generateSeqNo(); - cancellableThreads.execute(() -> requestSeqIdTracker.waitForOpsToComplete(requestSeqId - maxConcurrentFileChunks)); + cancellableThreads.execute( + () -> requestSeqIdTracker.waitForProcessedOpsToComplete(requestSeqId - maxConcurrentFileChunks)); cancellableThreads.checkForCancel(); if (error.get() != null) { break; @@ -718,10 +719,10 @@ void sendFiles(Store store, StoreFileMetaData[] files, Supplier translo cancellableThreads.executeIO(() -> recoveryTarget.writeFileChunk(md, requestFilePosition, content, lastChunk, translogOps.get(), ActionListener.wrap( - r -> requestSeqIdTracker.markSeqNoAsCompleted(requestSeqId), + r -> requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId), e -> { error.compareAndSet(null, Tuple.tuple(md, e)); - requestSeqIdTracker.markSeqNoAsCompleted(requestSeqId); + requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); } ))); position += content.length(); @@ -734,7 +735,7 @@ void sendFiles(Store store, StoreFileMetaData[] files, Supplier translo // When we terminate exceptionally, we don't wait for the outstanding requests as we don't use their results anyway. // This allows us to end quickly and eliminate the complexity of handling requestSeqIds in case of error. if (error.get() == null) { - cancellableThreads.execute(() -> requestSeqIdTracker.waitForOpsToComplete(requestSeqIdTracker.getMaxSeqNo())); + cancellableThreads.execute(() -> requestSeqIdTracker.waitForProcessedOpsToComplete(requestSeqIdTracker.getMaxSeqNo())); } if (error.get() != null) { handleErrorOnSendFiles(store, error.get().v1(), error.get().v2()); diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java b/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java index 18173dd275a46..320bc15fda1f4 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java @@ -42,6 +42,24 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; +/** + * The {@link OsProbe} class retrieves information about the physical and swap size of the machine + * memory, as well as the system load average and cpu load. + * + * In some exceptional cases, it's possible the underlying native method used by + * {@link #getFreePhysicalMemorySize()} and {@link #getTotalPhysicalMemorySize()} can return a + * negative value. Because of this, we prevent those methods from returning negative values, + * returning 0 instead. + * + * The OS can report a negative number in a number of cases: + * - Non-supported OSes (HP-UX, or AIX) + * - A failure of macOS to initialize host statistics + * - An OS that does not support the {@code _SC_PHYS_PAGES} or {@code _SC_PAGE_SIZE} flags for the {@code sysconf()} linux kernel call + * - An overflow of the product of {@code _SC_PHYS_PAGES} and {@code _SC_PAGE_SIZE} + * - An error case retrieving these values from a linux kernel + * - A non-standard libc implementation not implementing the required values + * For a more exhaustive explanation, see https://github.com/elastic/elasticsearch/pull/42725 + */ public class OsProbe { private static final OperatingSystemMXBean osMxBean = ManagementFactory.getOperatingSystemMXBean(); @@ -67,12 +85,19 @@ public class OsProbe { */ public long getFreePhysicalMemorySize() { if (getFreePhysicalMemorySize == null) { - return -1; + logger.warn("getFreePhysicalMemorySize is not available"); + return 0; } try { - return (long) getFreePhysicalMemorySize.invoke(osMxBean); + final long freeMem = (long) getFreePhysicalMemorySize.invoke(osMxBean); + if (freeMem < 0) { + logger.warn("OS reported a negative free memory value [{}]", freeMem); + return 0; + } + return freeMem; } catch (Exception e) { - return -1; + logger.warn("exception retrieving free physical memory", e); + return 0; } } @@ -81,12 +106,19 @@ public long getFreePhysicalMemorySize() { */ public long getTotalPhysicalMemorySize() { if (getTotalPhysicalMemorySize == null) { - return -1; + logger.warn("getTotalPhysicalMemorySize is not available"); + return 0; } try { - return (long) getTotalPhysicalMemorySize.invoke(osMxBean); + final long totalMem = (long) getTotalPhysicalMemorySize.invoke(osMxBean); + if (totalMem < 0) { + logger.warn("OS reported a negative total memory value [{}]", totalMem); + return 0; + } + return totalMem; } catch (Exception e) { - return -1; + logger.warn("exception retrieving total physical memory", e); + return 0; } } diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java b/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java index 86047281a22fb..9dbd9e4365a53 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java @@ -228,13 +228,17 @@ public static class Mem implements Writeable, ToXContentFragment { private final long free; public Mem(long total, long free) { + assert total >= 0 : "expected total memory to be positive, got: " + total; + assert free >= 0 : "expected free memory to be positive, got: " + total; this.total = total; this.free = free; } public Mem(StreamInput in) throws IOException { this.total = in.readLong(); + assert total >= 0 : "expected total memory to be positive, got: " + total; this.free = in.readLong(); + assert free >= 0 : "expected free memory to be positive, got: " + total; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java index 32695ac69a88e..5e5e60760d790 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java @@ -40,6 +40,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Objects; /** * A specialization of {@link DeferringBucketCollector} that collects all @@ -48,28 +49,28 @@ * this collector. */ public class BestBucketsDeferringCollector extends DeferringBucketCollector { - private static class Entry { + static class Entry { final LeafReaderContext context; final PackedLongValues docDeltas; final PackedLongValues buckets; Entry(LeafReaderContext context, PackedLongValues docDeltas, PackedLongValues buckets) { - this.context = context; - this.docDeltas = docDeltas; - this.buckets = buckets; + this.context = Objects.requireNonNull(context); + this.docDeltas = Objects.requireNonNull(docDeltas); + this.buckets = Objects.requireNonNull(buckets); } } - final List entries = new ArrayList<>(); - BucketCollector collector; - final SearchContext searchContext; - final boolean isGlobal; - LeafReaderContext context; - PackedLongValues.Builder docDeltas; - PackedLongValues.Builder buckets; - long maxBucket = -1; - boolean finished = false; - LongHash selectedBuckets; + protected List entries = new ArrayList<>(); + protected BucketCollector collector; + protected final SearchContext searchContext; + protected final boolean isGlobal; + protected LeafReaderContext context; + protected PackedLongValues.Builder docDeltasBuilder; + protected PackedLongValues.Builder bucketsBuilder; + protected long maxBucket = -1; + protected boolean finished = false; + protected LongHash selectedBuckets; /** * Sole constructor. @@ -97,28 +98,32 @@ public void setDeferredCollector(Iterable deferredCollectors) { private void finishLeaf() { if (context != null) { - entries.add(new Entry(context, docDeltas.build(), buckets.build())); + assert docDeltasBuilder != null && bucketsBuilder != null; + entries.add(new Entry(context, docDeltasBuilder.build(), bucketsBuilder.build())); } - context = null; - docDeltas = null; - buckets = null; } @Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException { finishLeaf(); - context = ctx; - docDeltas = PackedLongValues.packedBuilder(PackedInts.DEFAULT); - buckets = PackedLongValues.packedBuilder(PackedInts.DEFAULT); + context = null; + // allocates the builder lazily in case this segment doesn't contain any match + docDeltasBuilder = null; + bucketsBuilder = null; return new LeafBucketCollector() { int lastDoc = 0; @Override public void collect(int doc, long bucket) throws IOException { - docDeltas.add(doc - lastDoc); - buckets.add(bucket); + if (context == null) { + context = ctx; + docDeltasBuilder = PackedLongValues.packedBuilder(PackedInts.DEFAULT); + bucketsBuilder = PackedLongValues.packedBuilder(PackedInts.DEFAULT); + } + docDeltasBuilder.add(doc - lastDoc); + bucketsBuilder.add(bucket); lastDoc = doc; maxBucket = Math.max(maxBucket, bucket); } @@ -141,7 +146,7 @@ public void postCollection() throws IOException { */ @Override public void prepareSelectedBuckets(long... selectedBuckets) throws IOException { - if (!finished) { + if (finished == false) { throw new IllegalStateException("Cannot replay yet, collection is not finished: postCollect() has not been called"); } if (this.selectedBuckets != null) { @@ -160,14 +165,16 @@ public void prepareSelectedBuckets(long... selectedBuckets) throws IOException { Query query = isGlobal ? new MatchAllDocsQuery() : searchContext.query(); weight = searchContext.searcher().createWeight(searchContext.searcher().rewrite(query), ScoreMode.COMPLETE, 1f); } + for (Entry entry : entries) { + assert entry.docDeltas.size() > 0 : "segment should have at least one document to replay, got 0"; final LeafBucketCollector leafCollector = collector.getLeafCollector(entry.context); - DocIdSetIterator docIt = null; - if (needsScores && entry.docDeltas.size() > 0) { + DocIdSetIterator scoreIt = null; + if (needsScores) { Scorer scorer = weight.scorer(entry.context); // We don't need to check if the scorer is null // since we are sure that there are documents to replay (entry.docDeltas it not empty). - docIt = scorer.iterator(); + scoreIt = scorer.iterator(); leafCollector.setScorer(scorer); } final PackedLongValues.Iterator docDeltaIterator = entry.docDeltas.iterator(); @@ -179,17 +186,16 @@ public void prepareSelectedBuckets(long... selectedBuckets) throws IOException { final long rebasedBucket = hash.find(bucket); if (rebasedBucket != -1) { if (needsScores) { - if (docIt.docID() < doc) { - docIt.advance(doc); + if (scoreIt.docID() < doc) { + scoreIt.advance(doc); } // aggregations should only be replayed on matching documents - assert docIt.docID() == doc; + assert scoreIt.docID() == doc; } leafCollector.collect(doc, rebasedBucket); } } } - collector.postCollection(); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java index 53049d0301c2d..b293cc53a3629 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java @@ -19,98 +19,25 @@ package org.elasticsearch.search.aggregations.bucket; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.Weight; import org.apache.lucene.util.packed.PackedInts; import org.apache.lucene.util.packed.PackedLongValues; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.LongHash; -import org.elasticsearch.search.aggregations.Aggregator; -import org.elasticsearch.search.aggregations.BucketCollector; -import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.LeafBucketCollector; -import org.elasticsearch.search.aggregations.MultiBucketCollector; import org.elasticsearch.search.internal.SearchContext; -import java.io.IOException; import java.util.ArrayList; import java.util.List; /** - * A specialization of {@link DeferringBucketCollector} that collects all + * A specialization of {@link BestBucketsDeferringCollector} that collects all * matches and then is able to replay a given subset of buckets. Exposes * mergeBuckets, which can be invoked by the aggregator when increasing the * rounding interval. */ -public class MergingBucketsDeferringCollector extends DeferringBucketCollector { - - List entries = new ArrayList<>(); - BucketCollector collector; - final SearchContext searchContext; - LeafReaderContext context; - PackedLongValues.Builder docDeltas; - PackedLongValues.Builder buckets; - long maxBucket = -1; - boolean finished = false; - LongHash selectedBuckets; - - public MergingBucketsDeferringCollector(SearchContext context) { - this.searchContext = context; - } - - @Override - public void setDeferredCollector(Iterable deferredCollectors) { - this.collector = MultiBucketCollector.wrap(deferredCollectors); - } - - @Override - public ScoreMode scoreMode() { - if (collector == null) { - throw new IllegalStateException(); - } - return collector.scoreMode(); - } - - @Override - public void preCollection() throws IOException { - collector.preCollection(); - } - - private void finishLeaf() { - if (context != null) { - entries.add(new Entry(context, docDeltas.build(), buckets.build())); - } - context = null; - docDeltas = null; - buckets = null; - } - - @Override - public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException { - finishLeaf(); - - context = ctx; - docDeltas = PackedLongValues.packedBuilder(PackedInts.DEFAULT); - buckets = PackedLongValues.packedBuilder(PackedInts.DEFAULT); - - return new LeafBucketCollector() { - int lastDoc = 0; - - @Override - public void collect(int doc, long bucket) { - docDeltas.add(doc - lastDoc); - buckets.add(bucket); - lastDoc = doc; - maxBucket = Math.max(maxBucket, bucket); - } - }; +public class MergingBucketsDeferringCollector extends BestBucketsDeferringCollector { + public MergingBucketsDeferringCollector(SearchContext context, boolean isGlobal) { + super(context, isGlobal); } public void mergeBuckets(long[] mergeMap) { - List newEntries = new ArrayList<>(entries.size()); for (Entry sourceEntry : entries) { PackedLongValues.Builder newBuckets = PackedLongValues.packedBuilder(PackedInts.DEFAULT); @@ -124,117 +51,14 @@ public void mergeBuckets(long[] mergeMap) { // if there are buckets that have been collected in the current segment // we need to update the bucket ordinals there too - if (buckets.size() > 0) { - PackedLongValues currentBuckets = buckets.build(); + if (bucketsBuilder.size() > 0) { + PackedLongValues currentBuckets = bucketsBuilder.build(); PackedLongValues.Builder newBuckets = PackedLongValues.packedBuilder(PackedInts.DEFAULT); for (PackedLongValues.Iterator itr = currentBuckets.iterator(); itr.hasNext();) { long bucket = itr.next(); newBuckets.add(mergeMap[Math.toIntExact(bucket)]); } - buckets = newBuckets; - } - } - - @Override - public void postCollection() { - finishLeaf(); - finished = true; - } - - /** - * Replay the wrapped collector, but only on a selection of buckets. - */ - @Override - public void prepareSelectedBuckets(long... selectedBuckets) throws IOException { - if (finished == false) { - throw new IllegalStateException("Cannot replay yet, collection is not finished: postCollect() has not been called"); - } - if (this.selectedBuckets != null) { - throw new IllegalStateException("Already been replayed"); - } - - final LongHash hash = new LongHash(selectedBuckets.length, BigArrays.NON_RECYCLING_INSTANCE); - for (long bucket : selectedBuckets) { - hash.add(bucket); - } - this.selectedBuckets = hash; - - boolean needsScores = collector.scoreMode().needsScores(); - Weight weight = null; - if (needsScores) { - weight = searchContext.searcher().createWeight( - searchContext.searcher().rewrite(searchContext.query()), - ScoreMode.COMPLETE, 1f); - } - for (Entry entry : entries) { - final LeafBucketCollector leafCollector = collector.getLeafCollector(entry.context); - DocIdSetIterator docIt = null; - if (needsScores && entry.docDeltas.size() > 0) { - Scorer scorer = weight.scorer(entry.context); - // We don't need to check if the scorer is null - // since we are sure that there are documents to replay - // (entry.docDeltas it not empty). - docIt = scorer.iterator(); - leafCollector.setScorer(scorer); - } - final PackedLongValues.Iterator docDeltaIterator = entry.docDeltas.iterator(); - final PackedLongValues.Iterator buckets = entry.buckets.iterator(); - int doc = 0; - for (long i = 0, end = entry.docDeltas.size(); i < end; ++i) { - doc += docDeltaIterator.next(); - final long bucket = buckets.next(); - final long rebasedBucket = hash.find(bucket); - if (rebasedBucket != -1) { - if (needsScores) { - if (docIt.docID() < doc) { - docIt.advance(doc); - } - // aggregations should only be replayed on matching - // documents - assert docIt.docID() == doc; - } - leafCollector.collect(doc, rebasedBucket); - } - } - } - - collector.postCollection(); - } - - /** - * Wrap the provided aggregator so that it behaves (almost) as if it had - * been collected directly. - */ - @Override - public Aggregator wrap(final Aggregator in) { - - return new WrappedAggregator(in) { - - @Override - public InternalAggregation buildAggregation(long bucket) throws IOException { - if (selectedBuckets == null) { - throw new IllegalStateException("Collection has not been replayed yet."); - } - final long rebasedBucket = selectedBuckets.find(bucket); - if (rebasedBucket == -1) { - throw new IllegalStateException("Cannot build for a bucket which has not been collected [" + bucket + "]"); - } - return in.buildAggregation(rebasedBucket); - } - - }; - } - - private static class Entry { - final LeafReaderContext context; - final PackedLongValues docDeltas; - final PackedLongValues buckets; - - Entry(LeafReaderContext context, PackedLongValues docDeltas, PackedLongValues buckets) { - this.context = context; - this.docDeltas = docDeltas; - this.buckets = buckets; + bucketsBuilder = newBuckets; } } - } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java index 1b982ea9deca2..b10507cd2ce65 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java @@ -93,7 +93,7 @@ protected boolean shouldDefer(Aggregator aggregator) { @Override public DeferringBucketCollector getDeferringCollector() { - deferringCollector = new MergingBucketsDeferringCollector(context); + deferringCollector = new MergingBucketsDeferringCollector(context, descendsFromGlobalAggregator(parent())); return deferringCollector; } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 4f690d12acf1e..db4e0f021e05b 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -183,7 +183,8 @@ private synchronized void updateRemoteClusters(Map>> oldSeedNodes, + final List>> newSeedNodes) { + if (oldSeedNodes.size() != newSeedNodes.size()) { + return true; + } + Set oldSeeds = oldSeedNodes.stream().map(Tuple::v1).collect(Collectors.toSet()); + Set newSeeds = newSeedNodes.stream().map(Tuple::v1).collect(Collectors.toSet()); + return oldSeeds.equals(newSeeds) == false; + } + /** * Collects all nodes of the given clusters and returns / passes a (clusterAlias, nodeId) to {@link DiscoveryNode} * function on success. diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java index d7974ed1c6365..6913b518d2464 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java @@ -136,9 +136,13 @@ public static void afterClass() { } private void executeOnPrimaryOrReplica() throws Throwable { + executeOnPrimaryOrReplica(false); + } + + private void executeOnPrimaryOrReplica(boolean phase1) throws Throwable { final TaskId taskId = new TaskId("_node_id", randomNonNegativeLong()); final TransportVerifyShardBeforeCloseAction.ShardRequest request = - new TransportVerifyShardBeforeCloseAction.ShardRequest(indexShard.shardId(), clusterBlock, taskId); + new TransportVerifyShardBeforeCloseAction.ShardRequest(indexShard.shardId(), clusterBlock, phase1, taskId); final PlainActionFuture res = PlainActionFuture.newFuture(); action.shardOperationOnPrimary(request, indexShard, ActionListener.wrap( r -> { @@ -165,6 +169,11 @@ public void testShardIsFlushed() throws Throwable { assertThat(flushRequest.getValue().force(), is(true)); } + public void testShardIsSynced() throws Throwable { + executeOnPrimaryOrReplica(true); + verify(indexShard, times(1)).sync(); + } + public void testOperationFailsWhenNotBlocked() { when(indexShard.getActiveOperationsCount()).thenReturn(randomIntBetween(0, 10)); @@ -227,7 +236,7 @@ public void testUnavailableShardsMarkedAsStale() throws Exception { final PlainActionFuture listener = new PlainActionFuture<>(); TaskId taskId = new TaskId(clusterService.localNode().getId(), 0L); TransportVerifyShardBeforeCloseAction.ShardRequest request = - new TransportVerifyShardBeforeCloseAction.ShardRequest(shardId, clusterBlock, taskId); + new TransportVerifyShardBeforeCloseAction.ShardRequest(shardId, clusterBlock, false, taskId); ReplicationOperation.Replicas proxy = action.newReplicasProxy(); ReplicationOperation operation = new ReplicationOperation<>( @@ -303,6 +312,11 @@ public long localCheckpoint() { return 0; } + @Override + public long computedGlobalCheckpoint() { + return 0; + } + @Override public long globalCheckpoint() { return 0; diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index b14bdd0ed9883..32cc6564e5ca0 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -62,7 +62,6 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.Arrays; import java.util.Map; @@ -81,7 +80,6 @@ protected boolean forbidPrivateIndexSettings() { return false; } - @TestLogging("org.elasticsearch.index.store:DEBUG") public void testCreateShrinkIndexToN() { assumeFalse("https://github.com/elastic/elasticsearch/issues/34080", Constants.WINDOWS); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java index 058dcc7243029..ba6622ec2d142 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java @@ -51,6 +51,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.shard.DocsStats; +import org.elasticsearch.rest.action.cat.RestIndicesActionTests; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -165,6 +166,35 @@ public void testEvaluateWithoutDocStats() { } } + public void testEvaluateWithoutMetaData() { + MaxDocsCondition maxDocsCondition = new MaxDocsCondition(100L); + MaxAgeCondition maxAgeCondition = new MaxAgeCondition(TimeValue.timeValueHours(2)); + MaxSizeCondition maxSizeCondition = new MaxSizeCondition(new ByteSizeValue(randomIntBetween(10, 100), ByteSizeUnit.MB)); + + long matchMaxDocs = randomIntBetween(100, 1000); + final Set> conditions = Sets.newHashSet(maxDocsCondition, maxAgeCondition, maxSizeCondition); + Map results = evaluateConditions(conditions, + new DocsStats(matchMaxDocs, 0L, ByteSizeUnit.MB.toBytes(120)), null); + assertThat(results.size(), equalTo(3)); + results.forEach((k, v) -> assertFalse(v)); + + final Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 1000)) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(10)) + .build(); + + final IndexMetaData metaData = IndexMetaData.builder(randomAlphaOfLength(10)) + .creationDate(System.currentTimeMillis() - TimeValue.timeValueHours(randomIntBetween(5, 10)).getMillis()) + .settings(settings) + .build(); + IndicesStatsResponse indicesStats = RestIndicesActionTests.randomIndicesStatsResponse(new IndexMetaData[]{metaData}); + Map results2 = evaluateConditions(conditions, null, indicesStats); + assertThat(results2.size(), equalTo(3)); + results2.forEach((k, v) -> assertFalse(v)); + } + public void testCreateUpdateAliasRequest() { String sourceAlias = randomAlphaOfLength(10); String sourceIndex = randomAlphaOfLength(10); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java index 70d45d1db3ac6..80694a0a8b437 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java @@ -38,7 +38,6 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.store.MockFSIndexStore; import java.util.Arrays; @@ -59,8 +58,6 @@ import static org.hamcrest.Matchers.nullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) -@TestLogging("_root:DEBUG,org.elasticsearch.action.admin.indices.shards:TRACE,org.elasticsearch.cluster.service:TRACE," + - "org.elasticsearch.gateway.TransportNodesListGatewayStartedShards:TRACE") public class IndicesShardStoreRequestIT extends ESIntegTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkRejectionIT.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkRejectionIT.java index 900f50a9be005..9104ae78810ae 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkRejectionIT.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkRejectionIT.java @@ -23,8 +23,13 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -41,6 +46,18 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } + @Override + protected Collection> nodePlugins() { + return Arrays.asList(InternalSettingsPlugin.class); + } + + @Override + public Settings indexSettings() { + return Settings.builder().put(super.indexSettings()) + // sync global checkpoint quickly so we can verify seq_no_stats aligned between all copies after tests. + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s").build(); + } + @Override protected int numberOfReplicas() { return 1; diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java index c959e3ed45d1a..9f86d190a644a 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java @@ -137,6 +137,7 @@ public void testReplication() throws Exception { assertThat(primary.knownLocalCheckpoints.remove(primaryShard.allocationId().getId()), equalTo(primary.localCheckpoint)); assertThat(primary.knownLocalCheckpoints, equalTo(replicasProxy.generatedLocalCheckpoints)); + assertThat(primary.knownGlobalCheckpoints.remove(primaryShard.allocationId().getId()), equalTo(primary.globalCheckpoint)); assertThat(primary.knownGlobalCheckpoints, equalTo(replicasProxy.generatedGlobalCheckpoints)); } @@ -533,6 +534,11 @@ public long globalCheckpoint() { return globalCheckpoint; } + @Override + public long computedGlobalCheckpoint() { + return globalCheckpoint; + } + @Override public long maxSeqNoOfUpdatesOrDeletes() { return maxSeqNoOfUpdatesOrDeletes; diff --git a/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index 164c74423aa7f..a78bd4a67ece4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -41,7 +41,6 @@ import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import java.util.ArrayList; @@ -63,7 +62,6 @@ import static org.hamcrest.Matchers.nullValue; @ClusterScope(scope = Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) -@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.cluster.coordination:TRACE") public class MinimumMasterNodesIT extends ESIntegTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java index abfa0f692239c..27032b1a50c61 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java @@ -51,7 +51,6 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.hamcrest.CollectionAssertions; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; import org.junit.Before; @@ -254,7 +253,6 @@ public void testLargeClusterStatePublishing() throws Exception { } } - @TestLogging("org.elasticsearch.action.admin.indices.close:DEBUG,org.elasticsearch.cluster.metadata:DEBUG") public void testIndicesOptions() throws Exception { ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().setMetaData(true).setIndices("f*") .get(); diff --git a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index 7e4c1c5c3435e..cad4f51d7eac0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -31,7 +31,6 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; @@ -40,7 +39,6 @@ import static org.hamcrest.Matchers.nullValue; @ClusterScope(scope = Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) -@TestLogging("_root:DEBUG,org.elasticsearch.action.admin.cluster.state:TRACE") public class SpecificMasterNodesIT extends ESIntegTestCase { public void testSimpleOnlyMasterNodeElection() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java index 65d6af302da1b..4416912de0cc4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java @@ -40,7 +40,6 @@ import org.elasticsearch.node.Node; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -76,7 +75,6 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -@TestLogging("org.elasticsearch.cluster.service:TRACE,org.elasticsearch.cluster.coordination:TRACE") public class NodeJoinTests extends ESTestCase { private static ThreadPool threadPool; diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java index 62491724b9221..9658900f4ebe9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java @@ -50,7 +50,6 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.disruption.BlockClusterStateProcessing; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.transport.TransportSettings; import java.util.List; @@ -68,7 +67,6 @@ import static org.hamcrest.Matchers.instanceOf; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) -@TestLogging("_root:DEBUG") public class RareClusterStateIT extends ESIntegTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index ce453ddbf963b..6f6dde9f4f3f7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -36,7 +36,6 @@ import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; import java.nio.file.Path; @@ -53,7 +52,6 @@ import static org.hamcrest.Matchers.greaterThan; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) -@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.cluster.coordination:TRACE") public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase { private MockTerminal executeCommand(ElasticsearchNodeCommand command, Environment environment, boolean abort) diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java index feffbfc792656..61bf1fdcc7896 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java @@ -38,7 +38,6 @@ import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.TestCustomMetaData; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.transport.RemoteTransportException; import java.util.EnumSet; @@ -55,7 +54,6 @@ import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) -@TestLogging("_root:DEBUG") public class ZenDiscoveryIT extends ESIntegTestCase { public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index 0e6b24c45d169..5ffa7dbfe444c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -53,7 +53,6 @@ import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDisconnect; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import java.util.ArrayList; @@ -491,8 +490,6 @@ public void testForceAllocatePrimaryOnNoDecision() throws Exception { /** * This test asserts that replicas failed to execute resync operations will be failed but not marked as stale. */ - @TestLogging("_root:DEBUG, org.elasticsearch.cluster.routing.allocation:TRACE, org.elasticsearch.cluster.action.shard:TRACE," + - "org.elasticsearch.indices.recovery:TRACE, org.elasticsearch.cluster.routing.allocation.allocator:TRACE") public void testPrimaryReplicaResyncFailed() throws Exception { String master = internalCluster().startMasterOnlyNode(Settings.EMPTY); final int numberOfReplicas = between(2, 3); diff --git a/server/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java b/server/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java index 6b33b7eb3e2a8..0e732d5fb0502 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java @@ -30,7 +30,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.Arrays; import java.util.HashSet; @@ -330,7 +329,6 @@ public void onFailure(String source, Exception e) { assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true)); } - @TestLogging("_root:debug,org.elasticsearch.action.admin.cluster.tasks:trace") public void testPendingUpdateTask() throws Exception { String node_0 = internalCluster().startNode(); internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java index 6b5f7d95700d1..7ffe6a4090a29 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java @@ -19,12 +19,12 @@ package org.elasticsearch.common.util.concurrent; -import java.util.concurrent.TimeUnit; -import java.util.function.Function; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; + +import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -154,7 +154,6 @@ public void testAutoQueueSizingWithMin() throws Exception { context.close(); } - @TestLogging("org.elasticsearch.common.util.concurrent:DEBUG") public void testAutoQueueSizingWithMax() throws Exception { ThreadContext context = new ThreadContext(Settings.EMPTY); ResizableBlockingQueue queue = diff --git a/server/src/test/java/org/elasticsearch/discovery/DiskDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/DiskDisruptionIT.java new file mode 100644 index 0000000000000..714ae7e5688a0 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/discovery/DiskDisruptionIT.java @@ -0,0 +1,180 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.discovery; + +import com.carrotsearch.randomizedtesting.RandomizedTest; +import org.apache.lucene.mockfile.FilterFileSystemProvider; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.io.PathUtilsForTesting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.test.BackgroundIndexer; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.FileSystem; +import java.nio.file.OpenOption; +import java.nio.file.Path; +import java.nio.file.attribute.FileAttribute; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class DiskDisruptionIT extends AbstractDisruptionTestCase { + + private static DisruptTranslogFileSystemProvider disruptTranslogFileSystemProvider; + + @BeforeClass + public static void installDisruptTranslogFS() { + FileSystem current = PathUtils.getDefaultFileSystem(); + disruptTranslogFileSystemProvider = new DisruptTranslogFileSystemProvider(current); + PathUtilsForTesting.installMock(disruptTranslogFileSystemProvider.getFileSystem(null)); + } + + @AfterClass + public static void removeDisruptTranslogFS() { + PathUtilsForTesting.teardown(); + } + + void injectTranslogFailures() { + disruptTranslogFileSystemProvider.injectFailures.set(true); + } + + @After + void stopTranslogFailures() { + disruptTranslogFileSystemProvider.injectFailures.set(false); + } + + static class DisruptTranslogFileSystemProvider extends FilterFileSystemProvider { + + AtomicBoolean injectFailures = new AtomicBoolean(); + + DisruptTranslogFileSystemProvider(FileSystem inner) { + super("disrupttranslog://", inner); + } + + @Override + public FileChannel newFileChannel(Path path, Set options, FileAttribute... attrs) throws IOException { + if (injectFailures.get() && path.toString().endsWith(".ckp")) { + // prevents checkpoint file to be updated + throw new IOException("fake IOException"); + } + return super.newFileChannel(path, options, attrs); + } + + } + + /** + * This test checks that all operations below the global checkpoint are properly persisted. + * It simulates a full power outage by preventing translog checkpoint files to be written and restart the cluster. This means that + * all un-fsynced data will be lost. + */ + public void testGlobalCheckpointIsSafe() throws Exception { + startCluster(rarely() ? 5 : 3); + + final int numberOfShards = 1 + randomInt(2); + assertAcked(prepareCreate("test") + .setSettings(Settings.builder() + .put(indexSettings()) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numberOfShards) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(2)) + )); + ensureGreen(); + + AtomicBoolean stopGlobalCheckpointFetcher = new AtomicBoolean(); + + Map shardToGcp = new ConcurrentHashMap<>(); + for (int i = 0; i < numberOfShards; i++) { + shardToGcp.put(i, SequenceNumbers.NO_OPS_PERFORMED); + } + final Thread globalCheckpointSampler = new Thread(() -> { + while (stopGlobalCheckpointFetcher.get() == false) { + try { + for (ShardStats shardStats : client().admin().indices().prepareStats("test").clear().get().getShards()) { + final int shardId = shardStats.getShardRouting().id(); + final long globalCheckpoint = shardStats.getSeqNoStats().getGlobalCheckpoint(); + shardToGcp.compute(shardId, (i, v) -> Math.max(v, globalCheckpoint)); + } + } catch (Exception e) { + // ignore + logger.debug("failed to fetch shard stats", e); + } + } + }); + + globalCheckpointSampler.start(); + + try (BackgroundIndexer indexer = new BackgroundIndexer("test", "_doc", client(), -1, RandomizedTest.scaledRandomIntBetween(2, 5), + false, random())) { + indexer.setRequestTimeout(TimeValue.ZERO); + indexer.setIgnoreIndexingFailures(true); + indexer.setAssertNoFailuresOnStop(false); + indexer.start(-1); + + waitForDocs(randomIntBetween(1, 100), indexer); + + logger.info("injecting failures"); + injectTranslogFailures(); + logger.info("stopping indexing"); + } + + logger.info("full cluster restart"); + internalCluster().fullRestart(new InternalTestCluster.RestartCallback() { + + @Override + public void onAllNodesStopped() { + logger.info("stopping failures"); + stopTranslogFailures(); + } + + }); + + stopGlobalCheckpointFetcher.set(true); + + logger.info("waiting for global checkpoint sampler"); + globalCheckpointSampler.join(); + + logger.info("waiting for green"); + ensureGreen("test"); + + for (ShardStats shardStats : client().admin().indices().prepareStats("test").clear().get().getShards()) { + final int shardId = shardStats.getShardRouting().id(); + final long maxSeqNo = shardStats.getSeqNoStats().getMaxSeqNo(); + if (shardStats.getShardRouting().active()) { + assertThat(maxSeqNo, greaterThanOrEqualTo(shardToGcp.get(shardId))); + } + } + } + +} diff --git a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java index 37d8efd72c72f..d223e681f67c6 100644 --- a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java @@ -94,11 +94,6 @@ public void testMasterNodeGCs() throws Exception { * This test isolates the master from rest of the cluster, waits for a new master to be elected, restores the partition * and verifies that all node agree on the new cluster state */ - @TestLogging( - "_root:DEBUG," - + "org.elasticsearch.cluster.service:TRACE," - + "org.elasticsearch.gateway:TRACE," - + "org.elasticsearch.indices.store:TRACE") public void testIsolateMasterAndVerifyClusterStateConsensus() throws Exception { final List nodes = startCluster(3); @@ -244,16 +239,6 @@ public void testVerifyApiBlocksDuringPartition() throws Exception { } - @TestLogging( - "_root:DEBUG," - + "org.elasticsearch.action.bulk:TRACE," - + "org.elasticsearch.action.get:TRACE," - + "org.elasticsearch.cluster.service:TRACE," - + "org.elasticsearch.discovery:TRACE," - + "org.elasticsearch.indices.cluster:TRACE," - + "org.elasticsearch.indices.recovery:TRACE," - + "org.elasticsearch.index.seqno:TRACE," - + "org.elasticsearch.index.shard:TRACE") public void testMappingTimeout() throws Exception { startCluster(3); createIndex("test", Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java index 371b811c82d94..ceab9a4cb2d83 100644 --- a/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java @@ -37,7 +37,6 @@ import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.disruption.NetworkDisruption; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import java.util.Arrays; @@ -54,7 +53,6 @@ /** * Tests snapshot operations during disruptions. */ -@TestLogging("org.elasticsearch.snapshot:TRACE") @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class SnapshotDisruptionIT extends ESIntegTestCase { diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java index 1bf928fcb8060..a072ca880e01e 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java @@ -27,7 +27,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.hamcrest.Matchers; import java.io.IOException; @@ -40,7 +39,6 @@ @ClusterScope(supportsDedicatedMasters = false, numDataNodes = 1, scope = Scope.SUITE) public class InternalEngineMergeIT extends ESIntegTestCase { - @TestLogging("_root:DEBUG") public void testMergesHappening() throws InterruptedException, IOException, ExecutionException { final int numOfShards = randomIntBetween(1, 5); // some settings to keep num segments low diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index b0e4fb102ea42..59bbee9f1bbf5 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -21,7 +21,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; - import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -678,7 +677,7 @@ public long getMaxSeqNo() { } @Override - public long getCheckpoint() { + public long getProcessedCheckpoint() { return localCheckpoint.get(); } } @@ -892,18 +891,18 @@ public void testRecoveryFromTranslogUpToSeqNo() throws IOException { } } maxSeqNo = engine.getLocalCheckpointTracker().getMaxSeqNo(); - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getProcessedLocalCheckpoint())); engine.syncTranslog(); } try (InternalEngine engine = new InternalEngine(config)) { engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); - assertThat(engine.getLocalCheckpoint(), equalTo(maxSeqNo)); + assertThat(engine.getProcessedLocalCheckpoint(), equalTo(maxSeqNo)); assertThat(engine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(maxSeqNo)); } try (InternalEngine engine = new InternalEngine(config)) { long upToSeqNo = randomLongBetween(globalCheckpoint.get(), maxSeqNo); engine.recoverFromTranslog(translogHandler, upToSeqNo); - assertThat(engine.getLocalCheckpoint(), equalTo(upToSeqNo)); + assertThat(engine.getProcessedLocalCheckpoint(), equalTo(upToSeqNo)); assertThat(engine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(upToSeqNo)); } } @@ -1173,7 +1172,8 @@ public void testCommitAdvancesMinTranslogForRecovery() throws IOException { engine.index(indexForDoc(doc)); boolean inSync = randomBoolean(); if (inSync) { - globalCheckpoint.set(engine.getLocalCheckpoint()); + engine.syncTranslog(); // to advance persisted local checkpoint + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); } engine.flush(); @@ -1191,7 +1191,7 @@ public void testCommitAdvancesMinTranslogForRecovery() throws IOException { assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(inSync ? 4L : 1L)); assertThat(engine.getTranslog().getDeletionPolicy().getTranslogGenerationOfLastCommit(), equalTo(4L)); - globalCheckpoint.set(engine.getLocalCheckpoint()); + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); engine.flush(true, true); assertThat(engine.getTranslog().currentFileGeneration(), equalTo(5L)); assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(5L)); @@ -1615,7 +1615,7 @@ public void testForceMergeWithSoftDeletesRetention() throws Exception { } engine.flush(); - long localCheckpoint = engine.getLocalCheckpoint(); + long localCheckpoint = engine.getProcessedLocalCheckpoint(); globalCheckpoint.set(randomLongBetween(0, localCheckpoint)); engine.syncTranslog(); final long safeCommitCheckpoint; @@ -1703,7 +1703,7 @@ public void testForceMergeWithSoftDeletesRetentionAndRecoverySource() throws Exc } } engine.flush(); - globalCheckpoint.set(randomLongBetween(0, engine.getLocalCheckpoint())); + globalCheckpoint.set(randomLongBetween(0, engine.getPersistedLocalCheckpoint())); engine.syncTranslog(); final long minSeqNoToRetain; try (Engine.IndexCommitRef safeCommit = engine.acquireSafeIndexCommit()) { @@ -1715,7 +1715,7 @@ public void testForceMergeWithSoftDeletesRetentionAndRecoverySource() throws Exc assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService); Map ops = readAllOperationsInLucene(engine, mapperService) .stream().collect(Collectors.toMap(Translog.Operation::seqNo, Function.identity())); - for (long seqno = 0; seqno <= engine.getLocalCheckpoint(); seqno++) { + for (long seqno = 0; seqno <= engine.getPersistedLocalCheckpoint(); seqno++) { String msg = "seq# [" + seqno + "], global checkpoint [" + globalCheckpoint + "], retained-ops [" + retainedExtraOps + "]"; if (seqno < minSeqNoToRetain) { Translog.Operation op = ops.get(seqno); @@ -1737,14 +1737,14 @@ public void testForceMergeWithSoftDeletesRetentionAndRecoverySource() throws Exc // If the global checkpoint equals to the local checkpoint, the next force-merge will be a noop // because all deleted documents are expunged in the previous force-merge already. We need to flush // a new segment to make merge happen so that we can verify that all _recovery_source are pruned. - if (globalCheckpoint.get() == engine.getLocalCheckpoint() && liveDocs.isEmpty() == false) { + if (globalCheckpoint.get() == engine.getPersistedLocalCheckpoint() && liveDocs.isEmpty() == false) { String deleteId = randomFrom(liveDocs); engine.delete(new Engine.Delete("test", deleteId, newUid(deleteId), primaryTerm.get())); liveDocsWithSource.remove(deleteId); liveDocs.remove(deleteId); engine.flush(); } - globalCheckpoint.set(engine.getLocalCheckpoint()); + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); engine.syncTranslog(); engine.forceMerge(true, 1, false, false, false); assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService); @@ -2450,12 +2450,14 @@ public void testSeqNoAndCheckpoints() throws IOException { } } + initialEngine.syncTranslog(); // to advance persisted local checkpoint + if (randomInt(10) < 3) { // only update rarely as we do it every doc replicaLocalCheckpoint = randomIntBetween(Math.toIntExact(replicaLocalCheckpoint), Math.toIntExact(primarySeqNo)); } gcpTracker.updateLocalCheckpoint(primary.allocationId().getId(), - initialEngine.getLocalCheckpoint()); + initialEngine.getPersistedLocalCheckpoint()); gcpTracker.updateLocalCheckpoint(replica.allocationId().getId(), replicaLocalCheckpoint); if (rarely()) { @@ -2469,7 +2471,7 @@ public void testSeqNoAndCheckpoints() throws IOException { globalCheckpoint = gcpTracker.getGlobalCheckpoint(); assertEquals(primarySeqNo, initialEngine.getSeqNoStats(-1).getMaxSeqNo()); - assertEquals(primarySeqNo, initialEngine.getLocalCheckpoint()); + assertEquals(primarySeqNo, initialEngine.getPersistedLocalCheckpoint()); assertThat(globalCheckpoint, equalTo(replicaLocalCheckpoint)); assertThat( @@ -2503,7 +2505,8 @@ public void testSeqNoAndCheckpoints() throws IOException { // that the committed max seq no is equivalent to what the current primary seq no is, as all data // we have assigned sequence numbers to should be in the commit equalTo(primarySeqNo)); - assertThat(recoveringEngine.getLocalCheckpoint(), equalTo(primarySeqNo)); + assertThat(recoveringEngine.getProcessedLocalCheckpoint(), equalTo(primarySeqNo)); + assertThat(recoveringEngine.getPersistedLocalCheckpoint(), equalTo(primarySeqNo)); assertThat(recoveringEngine.getSeqNoStats(-1).getMaxSeqNo(), equalTo(primarySeqNo)); assertThat(generateNewSeqNo(recoveringEngine), equalTo(primarySeqNo + 1)); } @@ -2820,7 +2823,9 @@ public void testCurrentTranslogIDisCommitted() throws IOException { try (InternalEngine engine = createEngine(config)) { engine.index(firstIndexRequest); - globalCheckpoint.set(engine.getLocalCheckpoint()); + engine.syncTranslog(); // to advance persisted local checkpoint + assertEquals(engine.getProcessedLocalCheckpoint(), engine.getPersistedLocalCheckpoint()); + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); expectThrows(IllegalStateException.class, () -> engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE)); Map userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); @@ -2982,7 +2987,9 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s final ParsedDocument doc1 = testParsedDocument("1", null, testDocumentWithTextField(), SOURCE, null); engine.index(indexForDoc(doc1)); - globalCheckpoint.set(engine.getLocalCheckpoint()); + engine.syncTranslog(); // to advance local checkpoint + assertEquals(engine.getProcessedLocalCheckpoint(), engine.getPersistedLocalCheckpoint()); + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); throwErrorOnCommit.set(true); FlushFailedEngineException e = expectThrows(FlushFailedEngineException.class, engine::flush); assertThat(e.getCause().getMessage(), equalTo("power's out")); @@ -3042,7 +3049,7 @@ private Path[] filterExtraFSFiles(Path[] files) { } public void testTranslogReplay() throws IOException { - final LongSupplier inSyncGlobalCheckpointSupplier = () -> this.engine.getLocalCheckpoint(); + final LongSupplier inSyncGlobalCheckpointSupplier = () -> this.engine.getProcessedLocalCheckpoint(); final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); @@ -3136,7 +3143,7 @@ public void testRecoverFromForeignTranslog() throws IOException { final String badUUID = Translog.createEmptyTranslog(badTranslogLog, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); Translog translog = new Translog( new TranslogConfig(shardId, badTranslogLog, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), - badUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + badUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}); translog.add(new Translog.Index("test", "SomeBogusId", 0, primaryTerm.get(), "{}".getBytes(Charset.forName("UTF-8")))); assertEquals(generation.translogFileGeneration, translog.currentFileGeneration()); @@ -4178,9 +4185,10 @@ public void testSequenceNumberAdvancesToMaxSeqOnEngineOpenOnPrimary() throws Bro } } - assertThat(initialEngine.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint.get())); + assertThat(initialEngine.getProcessedLocalCheckpoint(), equalTo(expectedLocalCheckpoint.get())); assertThat(initialEngine.getSeqNoStats(-1).getMaxSeqNo(), equalTo((long) (docs - 1))); initialEngine.flush(true, true); + assertEquals(initialEngine.getProcessedLocalCheckpoint(), initialEngine.getPersistedLocalCheckpoint()); latchReference.get().countDown(); for (final Thread thread : threads) { @@ -4189,10 +4197,11 @@ public void testSequenceNumberAdvancesToMaxSeqOnEngineOpenOnPrimary() throws Bro } finally { IOUtils.close(initialEngine); } - try (Engine recoveringEngine = new InternalEngine(initialEngine.config())) { + try (InternalEngine recoveringEngine = new InternalEngine(initialEngine.config())) { recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); recoveringEngine.fillSeqNoGaps(2); - assertThat(recoveringEngine.getLocalCheckpoint(), greaterThanOrEqualTo((long) (docs - 1))); + assertEquals(recoveringEngine.getProcessedLocalCheckpoint(), recoveringEngine.getPersistedLocalCheckpoint()); + assertThat(recoveringEngine.getProcessedLocalCheckpoint(), greaterThanOrEqualTo((long) (docs - 1))); } } @@ -4273,7 +4282,7 @@ public void testOutOfOrderSequenceNumbersWithVersionConflict() throws IOExceptio expectedLocalCheckpoint = numberOfOperations - 1; } - assertThat(engine.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint)); + assertThat(engine.getProcessedLocalCheckpoint(), equalTo(expectedLocalCheckpoint)); try (Engine.GetResult result = engine.get(new Engine.Get(true, false, "type", "2", uid), searcherFactory)) { assertThat(result.exists(), equalTo(exists)); @@ -4305,12 +4314,12 @@ protected long doGenerateSeqNoForOperation(Operation operation) { final int gapsFilled = noOpEngine.fillSeqNoGaps(primaryTerm.get()); final String reason = "filling gaps"; noOpEngine.noOp(new Engine.NoOp(maxSeqNo + 1, primaryTerm.get(), LOCAL_TRANSLOG_RECOVERY, System.nanoTime(), reason)); - assertThat(noOpEngine.getLocalCheckpoint(), equalTo((long) (maxSeqNo + 1))); + assertThat(noOpEngine.getProcessedLocalCheckpoint(), equalTo((long) (maxSeqNo + 1))); assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(gapsFilled)); noOpEngine.noOp( new Engine.NoOp(maxSeqNo + 2, primaryTerm.get(), randomFrom(PRIMARY, REPLICA, PEER_RECOVERY), System.nanoTime(), reason)); - assertThat(noOpEngine.getLocalCheckpoint(), equalTo((long) (maxSeqNo + 2))); + assertThat(noOpEngine.getProcessedLocalCheckpoint(), equalTo((long) (maxSeqNo + 2))); assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(gapsFilled + 1)); // skip to the op that we added to the translog Translog.Operation op; @@ -4529,7 +4538,7 @@ public void testRestoreLocalHistoryFromTranslog() throws IOException { engine.flush(); } } - globalCheckpoint.set(randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, engine.getLocalCheckpoint())); + globalCheckpoint.set(randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, engine.getPersistedLocalCheckpoint())); engine.syncTranslog(); prevSeqNoStats = engine.getSeqNoStats(globalCheckpoint.get()); prevDocs = getDocIds(engine, true); @@ -4566,7 +4575,9 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { replicaEngine.index(replicaIndexForDoc(doc, 1, indexResult.getSeqNo(), false)); } } - checkpointOnReplica = replicaEngine.getLocalCheckpoint(); + engine.syncTranslog(); // to advance local checkpoint + replicaEngine.syncTranslog(); // to advance local checkpoint + checkpointOnReplica = replicaEngine.getProcessedLocalCheckpoint(); } finally { IOUtils.close(replicaEngine); } @@ -4574,17 +4585,17 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { boolean flushed = false; AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - Engine recoveringEngine = null; + InternalEngine recoveringEngine = null; try { assertEquals(docs - 1, engine.getSeqNoStats(-1).getMaxSeqNo()); - assertEquals(docs - 1, engine.getLocalCheckpoint()); + assertEquals(docs - 1, engine.getProcessedLocalCheckpoint()); assertEquals(maxSeqIDOnReplica, replicaEngine.getSeqNoStats(-1).getMaxSeqNo()); - assertEquals(checkpointOnReplica, replicaEngine.getLocalCheckpoint()); + assertEquals(checkpointOnReplica, replicaEngine.getProcessedLocalCheckpoint()); recoveringEngine = new InternalEngine(copy(replicaEngine.config(), globalCheckpoint::get)); assertEquals(numDocsOnReplica, getTranslog(recoveringEngine).stats().getUncommittedOperations()); recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertEquals(maxSeqIDOnReplica, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); - assertEquals(checkpointOnReplica, recoveringEngine.getLocalCheckpoint()); + assertEquals(checkpointOnReplica, recoveringEngine.getProcessedLocalCheckpoint()); assertEquals((maxSeqIDOnReplica + 1) - numDocsOnReplica, recoveringEngine.fillSeqNoGaps(2)); // now snapshot the tlog and ensure the primary term is updated @@ -4600,7 +4611,7 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { } assertEquals(maxSeqIDOnReplica, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); - assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpoint()); + assertEquals(maxSeqIDOnReplica, recoveringEngine.getProcessedLocalCheckpoint()); if ((flushed = randomBoolean())) { globalCheckpoint.set(recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); getTranslog(recoveringEngine).sync(); @@ -4619,10 +4630,10 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { } recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertEquals(maxSeqIDOnReplica, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); - assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpoint()); + assertEquals(maxSeqIDOnReplica, recoveringEngine.getProcessedLocalCheckpoint()); assertEquals(0, recoveringEngine.fillSeqNoGaps(3)); assertEquals(maxSeqIDOnReplica, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); - assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpoint()); + assertEquals(maxSeqIDOnReplica, recoveringEngine.getProcessedLocalCheckpoint()); } finally { IOUtils.close(recoveringEngine); } @@ -4806,7 +4817,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s // Advance the global checkpoint during the flush to create a lag between a persisted global checkpoint in the translog // (this value is visible to the deletion policy) and an in memory global checkpoint in the SequenceNumbersService. if (rarely()) { - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), getLocalCheckpoint())); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), getPersistedLocalCheckpoint())); } super.commitIndexWriter(writer, translog, syncId); } @@ -4818,7 +4829,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); engine.index(indexForDoc(testParsedDocument(Integer.toString(docId), null, document, B_1, null))); if (frequently()) { - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getPersistedLocalCheckpoint())); engine.syncTranslog(); } if (frequently()) { @@ -4958,11 +4969,11 @@ public void testCleanUpCommitsWhenGlobalCheckpointAdvanced() throws Exception { engine.flush(false, randomBoolean()); List commits = DirectoryReader.listCommits(store.directory()); // Global checkpoint advanced but not enough - all commits are kept. - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint() - 1)); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getPersistedLocalCheckpoint() - 1)); engine.syncTranslog(); assertThat(DirectoryReader.listCommits(store.directory()), equalTo(commits)); // Global checkpoint advanced enough - only the last commit is kept. - globalCheckpoint.set(randomLongBetween(engine.getLocalCheckpoint(), Long.MAX_VALUE)); + globalCheckpoint.set(randomLongBetween(engine.getPersistedLocalCheckpoint(), Long.MAX_VALUE)); engine.syncTranslog(); assertThat(DirectoryReader.listCommits(store.directory()), contains(commits.get(commits.size() - 1))); assertThat(engine.getTranslog().totalOperations(), equalTo(0)); @@ -4987,7 +4998,7 @@ public void testCleanupCommitsWhenReleaseSnapshot() throws Exception { for (int i = 0; i < numSnapshots; i++) { snapshots.add(engine.acquireSafeIndexCommit()); // taking snapshots from the safe commit. } - globalCheckpoint.set(engine.getLocalCheckpoint()); + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); engine.syncTranslog(); final List commits = DirectoryReader.listCommits(store.directory()); for (int i = 0; i < numSnapshots - 1; i++) { @@ -5068,7 +5079,7 @@ public void testStressShouldPeriodicallyFlush() throws Exception { engine.onSettingsChanged(); final int numOps = scaledRandomIntBetween(100, 10_000); for (int i = 0; i < numOps; i++) { - final long localCheckPoint = engine.getLocalCheckpoint(); + final long localCheckPoint = engine.getProcessedLocalCheckpoint(); final long seqno = randomLongBetween(Math.max(0, localCheckPoint), localCheckPoint + 5); final ParsedDocument doc = testParsedDocument(Long.toString(seqno), null, testDocumentWithTextField(), SOURCE, null); @@ -5251,8 +5262,8 @@ public void testTrackMaxSeqNoOfNonAppendOnlyOperations() throws Exception { } appendOnlyIndexer.join(120_000); assertThat(engine.getMaxSeqNoOfNonAppendOnlyOperations(), equalTo(maxSeqNoOfNonAppendOnly)); - globalCheckpoint.set(engine.getLocalCheckpoint()); engine.syncTranslog(); + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); engine.flush(); } try (InternalEngine engine = createEngine(store, translogPath, globalCheckpoint::get)) { @@ -5436,7 +5447,10 @@ public void testKeepMinRetainedSeqNoByMergePolicy() throws IOException { } existingSeqNos.add(result.getSeqNo()); if (randomBoolean()) { - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpointTracker().getCheckpoint())); + engine.syncTranslog(); // advance persisted local checkpoint + assertEquals(engine.getProcessedLocalCheckpoint(), engine.getPersistedLocalCheckpoint()); + globalCheckpoint.set( + randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpointTracker().getPersistedCheckpoint())); } if (randomBoolean()) { retentionLeasesVersion.incrementAndGet(); @@ -5500,7 +5514,7 @@ public void testLastRefreshCheckpoint() throws Exception { latch.countDown(); refreshThreads[i] = new Thread(() -> { while (done.get() == false) { - long checkPointBeforeRefresh = engine.getLocalCheckpoint(); + long checkPointBeforeRefresh = engine.getProcessedLocalCheckpoint(); engine.refresh("test", randomFrom(Engine.SearcherScope.values()), true); assertThat(engine.lastRefreshedCheckpoint(), greaterThanOrEqualTo(checkPointBeforeRefresh)); } @@ -5516,7 +5530,7 @@ public void testLastRefreshCheckpoint() throws Exception { thread.join(); } engine.refresh("test"); - assertThat(engine.lastRefreshedCheckpoint(), equalTo(engine.getLocalCheckpoint())); + assertThat(engine.lastRefreshedCheckpoint(), equalTo(engine.getProcessedLocalCheckpoint())); } public void testLuceneSnapshotRefreshesOnlyOnce() throws Exception { @@ -5629,8 +5643,8 @@ public void testRebuildLocalCheckpointTrackerAndVersionMap() throws Exception { flushedOperations.add(op); applyOperation(engine, op); if (randomBoolean()) { - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); engine.syncTranslog(); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getPersistedLocalCheckpoint())); } if (randomInt(100) < 10) { engine.refresh("test"); @@ -5654,7 +5668,7 @@ public void testRebuildLocalCheckpointTrackerAndVersionMap() throws Exception { try (InternalEngine engine = new InternalEngine(config)) { // do not recover from translog final Map deletesAfterCheckpoint = new HashMap<>(); for (Engine.Operation op : operationsInSafeCommit) { - if (op instanceof Engine.NoOp == false && op.seqNo() > engine.getLocalCheckpoint()) { + if (op instanceof Engine.NoOp == false && op.seqNo() > engine.getPersistedLocalCheckpoint()) { deletesAfterCheckpoint.put(new Term(IdFieldMapper.NAME, Uid.encodeId(op.id())).bytes(), op); } } @@ -5675,8 +5689,8 @@ public void testRebuildLocalCheckpointTrackerAndVersionMap() throws Exception { final Set seqNosInSafeCommit = operationsInSafeCommit.stream().map(op -> op.seqNo()).collect(Collectors.toSet()); for (Engine.Operation op : operations) { assertThat( - "seq_no=" + op.seqNo() + " max_seq_no=" + tracker.getMaxSeqNo() + " checkpoint=" + tracker.getCheckpoint(), - tracker.contains(op.seqNo()), equalTo(seqNosInSafeCommit.contains(op.seqNo()))); + "seq_no=" + op.seqNo() + " max_seq_no=" + tracker.getMaxSeqNo() + "checkpoint=" + tracker.getProcessedCheckpoint(), + tracker.hasProcessed(op.seqNo()), equalTo(seqNosInSafeCommit.contains(op.seqNo()))); } engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertThat(getDocIds(engine, true), equalTo(docs)); @@ -5696,8 +5710,8 @@ public void testOpenSoftDeletesIndexWithSoftDeletesDisabled() throws Exception { config(softDeletesEnabled, store, translogPath, newMergePolicy(), null, null, globalCheckpoint::get))) { List ops = generateHistoryOnReplica(between(1, 100), randomBoolean(), randomBoolean(), randomBoolean()); applyOperations(engine, ops); - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); - engine.syncTranslog(); + engine.syncTranslog(); // to advance persisted checkpoint + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getPersistedLocalCheckpoint())); engine.flush(); docs = getDocIds(engine, true); } @@ -5956,8 +5970,8 @@ public void testRecoverFromLocalTranslog() throws Exception { for (Engine.Operation op : operations) { applyOperation(engine, op); if (randomBoolean()) { - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); engine.syncTranslog(); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getPersistedLocalCheckpoint())); } if (randomInt(100) < 10) { engine.refresh("test"); diff --git a/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java index d1840c4d97cff..f6327e8132cea 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java @@ -161,7 +161,7 @@ public void testSkipStaleOrNonRootOfNestedDocuments() throws Exception { int totalOps = 0; for (Engine.Operation op : operations) { // Engine skips deletes or indexes below the local checkpoint - if (engine.getLocalCheckpoint() < op.seqNo() || op instanceof Engine.NoOp) { + if (engine.getProcessedLocalCheckpoint() < op.seqNo() || op instanceof Engine.NoOp) { seqNoToTerm.put(op.seqNo(), op.primaryTerm()); if (op instanceof Engine.Index) { totalOps += ((Engine.Index) op).docs().size(); @@ -228,7 +228,7 @@ public void testUpdateAndReadChangesConcurrently() throws Exception { readyLatch.countDown(); readyLatch.await(); concurrentlyApplyOps(operations, engine); - assertThat(engine.getLocalCheckpointTracker().getCheckpoint(), equalTo(operations.size() - 1L)); + assertThat(engine.getLocalCheckpointTracker().getProcessedCheckpoint(), equalTo(operations.size() - 1L)); isDone.set(true); for (Follower follower : followers) { follower.join(); @@ -237,13 +237,13 @@ public void testUpdateAndReadChangesConcurrently() throws Exception { } class Follower extends Thread { - private final Engine leader; + private final InternalEngine leader; private final InternalEngine engine; private final TranslogHandler translogHandler; private final AtomicBoolean isDone; private final CountDownLatch readLatch; - Follower(Engine leader, AtomicBoolean isDone, CountDownLatch readLatch) throws IOException { + Follower(InternalEngine leader, AtomicBoolean isDone, CountDownLatch readLatch) throws IOException { this.leader = leader; this.isDone = isDone; this.readLatch = readLatch; @@ -252,9 +252,9 @@ class Follower extends Thread { this.engine = createEngine(createStore(), createTempDir()); } - void pullOperations(Engine follower) throws IOException { - long leaderCheckpoint = leader.getLocalCheckpoint(); - long followerCheckpoint = follower.getLocalCheckpoint(); + void pullOperations(InternalEngine follower) throws IOException { + long leaderCheckpoint = leader.getLocalCheckpointTracker().getProcessedCheckpoint(); + long followerCheckpoint = follower.getLocalCheckpointTracker().getProcessedCheckpoint(); if (followerCheckpoint < leaderCheckpoint) { long fromSeqNo = followerCheckpoint + 1; long batchSize = randomLongBetween(0, 100); @@ -271,7 +271,8 @@ public void run() { readLatch.countDown(); readLatch.await(); while (isDone.get() == false || - engine.getLocalCheckpointTracker().getCheckpoint() < leader.getLocalCheckpoint()) { + engine.getLocalCheckpointTracker().getProcessedCheckpoint() < + leader.getLocalCheckpointTracker().getProcessedCheckpoint()) { pullOperations(engine); } assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService); diff --git a/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java index de32e3e43077d..6f74ac23a8e85 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java @@ -85,12 +85,12 @@ public void testNoopAfterRegularEngine() throws IOException { flushAndTrimTranslog(engine); - long localCheckpoint = engine.getLocalCheckpoint(); + long localCheckpoint = engine.getPersistedLocalCheckpoint(); long maxSeqNo = engine.getSeqNoStats(100L).getMaxSeqNo(); engine.close(); final NoOpEngine noOpEngine = new NoOpEngine(noOpConfig(INDEX_SETTINGS, store, primaryTranslogDir, tracker)); - assertThat(noOpEngine.getLocalCheckpoint(), equalTo(localCheckpoint)); + assertThat(noOpEngine.getPersistedLocalCheckpoint(), equalTo(localCheckpoint)); assertThat(noOpEngine.getSeqNoStats(100L).getMaxSeqNo(), equalTo(maxSeqNo)); try (Engine.IndexCommitRef ref = noOpEngine.acquireLastIndexCommit(false)) { try (IndexReader reader = DirectoryReader.open(ref.getIndexCommit())) { @@ -114,7 +114,8 @@ public void testNoOpEngineStats() throws Exception { if (rarely()) { engine.flush(); } - globalCheckpoint.set(engine.getLocalCheckpoint()); + engine.syncTranslog(); // advance persisted local checkpoint + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); } for (int i = 0; i < numDocs; i++) { @@ -122,11 +123,12 @@ public void testNoOpEngineStats() throws Exception { String delId = Integer.toString(i); Engine.DeleteResult result = engine.delete(new Engine.Delete("test", delId, newUid(delId), primaryTerm.get())); assertTrue(result.isFound()); - globalCheckpoint.set(engine.getLocalCheckpoint()); + engine.syncTranslog(); // advance persisted local checkpoint + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); deletions += 1; } } - engine.getLocalCheckpointTracker().waitForOpsToComplete(numDocs + deletions - 1); + engine.getLocalCheckpointTracker().waitForProcessedOpsToComplete(numDocs + deletions - 1); flushAndTrimTranslog(engine); } diff --git a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java index e0ad514e6dbb9..f01f4c5b8e3f9 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java @@ -62,15 +62,16 @@ public void testReadOnlyEngine() throws Exception { if (rarely()) { engine.flush(); } - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getPersistedLocalCheckpoint())); } engine.syncTranslog(); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getPersistedLocalCheckpoint())); engine.flush(); readOnlyEngine = new ReadOnlyEngine(engine.engineConfig, engine.getSeqNoStats(globalCheckpoint.get()), engine.getTranslogStats(), false, Function.identity()); lastSeqNoStats = engine.getSeqNoStats(globalCheckpoint.get()); lastDocIds = getDocIds(engine, true); - assertThat(readOnlyEngine.getLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); + assertThat(readOnlyEngine.getPersistedLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo())); assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); for (int i = 0; i < numDocs; i++) { @@ -94,7 +95,7 @@ public void testReadOnlyEngine() throws Exception { IOUtils.close(external, internal); // the locked down engine should still point to the previous commit - assertThat(readOnlyEngine.getLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); + assertThat(readOnlyEngine.getPersistedLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo())); assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); try (Engine.GetResult getResult = readOnlyEngine.get(get, readOnlyEngine::acquireSearcher)) { @@ -105,7 +106,7 @@ public void testReadOnlyEngine() throws Exception { try (InternalEngine recoveringEngine = new InternalEngine(config)) { recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); // the locked down engine should still point to the previous commit - assertThat(readOnlyEngine.getLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); + assertThat(readOnlyEngine.getPersistedLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo())); assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); } @@ -129,9 +130,10 @@ public void testFlushes() throws IOException { if (rarely()) { engine.flush(); } - globalCheckpoint.set(engine.getLocalCheckpoint()); + engine.syncTranslog(); // advance persisted local checkpoint + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); } - globalCheckpoint.set(engine.getLocalCheckpoint()); + globalCheckpoint.set(engine.getPersistedLocalCheckpoint()); engine.syncTranslog(); engine.flushAndClose(); readOnlyEngine = new ReadOnlyEngine(engine.engineConfig, null , null, true, Function.identity()); @@ -155,10 +157,10 @@ public void testEnsureMaxSeqNoIsEqualToGlobalCheckpoint() throws IOException { ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); engine.index(new Engine.Index(newUid(doc), doc, i, primaryTerm.get(), 1, null, Engine.Operation.Origin.REPLICA, System.nanoTime(), -1, false, SequenceNumbers.UNASSIGNED_SEQ_NO, 0)); - maxSeqNo = engine.getLocalCheckpoint(); + maxSeqNo = engine.getProcessedLocalCheckpoint(); } - globalCheckpoint.set(engine.getLocalCheckpoint() - 1); engine.syncTranslog(); + globalCheckpoint.set(engine.getPersistedLocalCheckpoint() - 1); engine.flushAndClose(); IllegalStateException exception = expectThrows(IllegalStateException.class, diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java index 21d84203f6df3..1539a800f2ae3 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java @@ -465,7 +465,7 @@ public void testGlobalOrdinals() throws Exception { assertThat(topLevelReader.leaves().size(), equalTo(3)); // First segment - assertThat(globalOrdinals, instanceOf(GlobalOrdinalsIndexFieldData.class)); + assertThat(globalOrdinals, instanceOf(GlobalOrdinalsIndexFieldData.Consumer.class)); LeafReaderContext leaf = topLevelReader.leaves().get(0); AtomicOrdinalsFieldData afd = globalOrdinals.load(leaf); SortedSetDocValues values = afd.getOrdinalsValues(); @@ -590,7 +590,7 @@ public void testGlobalOrdinalsGetRemovedOnceIndexReaderCloses() throws Exception IndexOrdinalsFieldData ifd = getForField("string", "value", hasDocValues()); IndexOrdinalsFieldData globalOrdinals = ifd.loadGlobal(topLevelReader); assertNotNull(globalOrdinals.getOrdinalMap()); - assertThat(ifd.loadGlobal(topLevelReader), sameInstance(globalOrdinals)); + assertThat(ifd.loadGlobal(topLevelReader).getOrdinalMap(), sameInstance(globalOrdinals.getOrdinalMap())); // 3 b/c 1 segment level caches and 1 top level cache // in case of doc values, we don't cache atomic FD, so only the top-level cache is there assertThat(indicesFieldDataCache.getCache().weight(), equalTo(hasDocValues() ? 1L : 4L)); @@ -602,7 +602,8 @@ public void testGlobalOrdinalsGetRemovedOnceIndexReaderCloses() throws Exception break; } } - assertThat(cachedInstance, sameInstance(globalOrdinals)); + assertNotSame(cachedInstance, globalOrdinals); + assertThat(cachedInstance.getOrdinalMap(), sameInstance(globalOrdinals.getOrdinalMap())); topLevelReader.close(); // Now only 3 segment level entries, only the toplevel reader has been closed, but the segment readers are still used by IW assertThat(indicesFieldDataCache.getCache().weight(), equalTo(hasDocValues() ? 0L : 3L)); diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 79d2f004cc95a..c94c289f51fb1 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -59,7 +59,6 @@ import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.recovery.RecoveryTarget; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; import java.util.ArrayList; @@ -228,7 +227,6 @@ public void testRecoveryToReplicaThatReceivedExtraDocument() throws Exception { } } - @TestLogging("org.elasticsearch.index.shard:TRACE,org.elasticsearch.indices.recovery:TRACE") public void testRecoveryAfterPrimaryPromotion() throws Exception { try (ReplicationGroup shards = createGroup(2)) { shards.startAll(); @@ -365,7 +363,6 @@ public void testReplicaRollbackStaleDocumentsInPeerRecovery() throws Exception { } } - @TestLogging("org.elasticsearch.index.shard:TRACE,org.elasticsearch.action.resync:TRACE") public void testResyncAfterPrimaryPromotion() throws Exception { // TODO: check translog trimming functionality once rollback is implemented in Lucene (ES trimming is done) Map mappings = @@ -522,16 +519,6 @@ public void finalizeRecovery(long globalCheckpoint, ActionListener listene } } - @TestLogging( - "_root:DEBUG," - + "org.elasticsearch.action.bulk:TRACE," - + "org.elasticsearch.action.get:TRACE," - + "org.elasticsearch.cluster.service:TRACE," - + "org.elasticsearch.discovery:TRACE," - + "org.elasticsearch.indices.cluster:TRACE," - + "org.elasticsearch.indices.recovery:TRACE," - + "org.elasticsearch.index.seqno:TRACE," - + "org.elasticsearch.index.shard:TRACE") public void testCheckpointsAndMarkingInSync() throws Exception { final IndexMetaData metaData = buildIndexMetaData(0); final BlockingEngineFactory replicaEngineFactory = new BlockingEngineFactory(); @@ -609,10 +596,10 @@ public void indexTranslogOperations( final long expectedDocs = docs + 2L; assertThat(shards.getPrimary().getLocalCheckpoint(), equalTo(expectedDocs - 1)); // recovery has not completed, therefore the global checkpoint can have advanced on the primary - assertThat(shards.getPrimary().getGlobalCheckpoint(), equalTo(expectedDocs - 1)); + assertThat(shards.getPrimary().getLastKnownGlobalCheckpoint(), equalTo(expectedDocs - 1)); // the pending document is not done, the checkpoints can not have advanced on the replica assertThat(replica.getLocalCheckpoint(), lessThan(expectedDocs - 1)); - assertThat(replica.getGlobalCheckpoint(), lessThan(expectedDocs - 1)); + assertThat(replica.getLastKnownGlobalCheckpoint(), lessThan(expectedDocs - 1)); } // wait for recovery to enter the translog phase @@ -625,9 +612,9 @@ public void indexTranslogOperations( final long expectedDocs = docs + 3L; assertThat(shards.getPrimary().getLocalCheckpoint(), equalTo(expectedDocs - 1)); // recovery is now in the process of being completed, therefore the global checkpoint can not have advanced on the primary - assertThat(shards.getPrimary().getGlobalCheckpoint(), equalTo(expectedDocs - 2)); + assertThat(shards.getPrimary().getLastKnownGlobalCheckpoint(), equalTo(expectedDocs - 2)); assertThat(replica.getLocalCheckpoint(), lessThan(expectedDocs - 2)); - assertThat(replica.getGlobalCheckpoint(), lessThan(expectedDocs - 2)); + assertThat(replica.getLastKnownGlobalCheckpoint(), lessThan(expectedDocs - 2)); } replicaEngineFactory.releaseLatchedIndexers(); @@ -637,10 +624,10 @@ public void indexTranslogOperations( final long expectedDocs = docs + 3L; assertBusy(() -> { assertThat(shards.getPrimary().getLocalCheckpoint(), equalTo(expectedDocs - 1)); - assertThat(shards.getPrimary().getGlobalCheckpoint(), equalTo(expectedDocs - 1)); + assertThat(shards.getPrimary().getLastKnownGlobalCheckpoint(), equalTo(expectedDocs - 1)); assertThat(replica.getLocalCheckpoint(), equalTo(expectedDocs - 1)); // the global checkpoint advances can only advance here if a background global checkpoint sync fires - assertThat(replica.getGlobalCheckpoint(), anyOf(equalTo(expectedDocs - 1), equalTo(expectedDocs - 2))); + assertThat(replica.getLastKnownGlobalCheckpoint(), anyOf(equalTo(expectedDocs - 1), equalTo(expectedDocs - 2))); }); } } @@ -775,7 +762,7 @@ public void testRollbackOnPromotion() throws Exception { } shards.refresh("test"); List docsBelowGlobalCheckpoint = EngineTestCase.getDocIds(getEngine(newPrimary), randomBoolean()) - .stream().filter(doc -> doc.getSeqNo() <= newPrimary.getGlobalCheckpoint()).collect(Collectors.toList()); + .stream().filter(doc -> doc.getSeqNo() <= newPrimary.getLastKnownGlobalCheckpoint()).collect(Collectors.toList()); CountDownLatch latch = new CountDownLatch(1); final AtomicBoolean done = new AtomicBoolean(); Thread thread = new Thread(() -> { diff --git a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java index cec3c05b28438..79b9b231b48d4 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java @@ -100,7 +100,7 @@ public void testTranslogSyncAfterGlobalCheckpointSync() throws Exception { lastSyncedGlobalCheckpoint = globalCheckpoint; } - when(indexShard.getGlobalCheckpoint()).thenReturn(globalCheckpoint); + when(indexShard.getLastKnownGlobalCheckpoint()).thenReturn(globalCheckpoint); when(indexShard.getLastSyncedGlobalCheckpoint()).thenReturn(lastSyncedGlobalCheckpoint); final GlobalCheckpointSyncAction action = new GlobalCheckpointSyncAction( diff --git a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java index 4f4f39c614687..8d148a74ea989 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java @@ -30,6 +30,8 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; @@ -58,6 +60,28 @@ protected Collection> nodePlugins() { .collect(Collectors.toList()); } + public void testGlobalCheckpointSyncWithAsyncDurability() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate( + "test", + Settings.builder() + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s") + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC) + .put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "1s") + .put("index.number_of_replicas", 1)) + .get(); + + for (int j = 0; j < 10; j++) { + final String id = Integer.toString(j); + client().prepareIndex("test", "test", id).setSource("{\"foo\": " + id + "}", XContentType.JSON).get(); + } + + assertBusy(() -> { + SeqNoStats seqNoStats = client().admin().indices().prepareStats("test").get().getIndex("test").getShards()[0].getSeqNoStats(); + assertThat(seqNoStats.getGlobalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo())); + }); + } + public void testPostOperationGlobalCheckpointSync() throws Exception { // set the sync interval high so it does not execute during this test runGlobalCheckpointSyncTest(TimeValue.timeValueHours(24), client -> {}, client -> {}); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java b/server/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java index 44b3794ea6d42..a11e29097cc48 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java @@ -55,43 +55,79 @@ public void setUp() throws Exception { tracker = createEmptyTracker(); } - public void testSimplePrimary() { + public void testSimplePrimaryProcessed() { long seqNo1, seqNo2; - assertThat(tracker.getCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + assertThat(tracker.getProcessedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); seqNo1 = tracker.generateSeqNo(); assertThat(seqNo1, equalTo(0L)); - tracker.markSeqNoAsCompleted(seqNo1); - assertThat(tracker.getCheckpoint(), equalTo(0L)); - assertThat(tracker.contains(0L), equalTo(true)); - assertThat(tracker.contains(atLeast(1)), equalTo(false)); + tracker.markSeqNoAsProcessed(seqNo1); + assertThat(tracker.getProcessedCheckpoint(), equalTo(0L)); + assertThat(tracker.hasProcessed(0L), equalTo(true)); + assertThat(tracker.hasProcessed(atLeast(1)), equalTo(false)); seqNo1 = tracker.generateSeqNo(); seqNo2 = tracker.generateSeqNo(); assertThat(seqNo1, equalTo(1L)); assertThat(seqNo2, equalTo(2L)); - tracker.markSeqNoAsCompleted(seqNo2); - assertThat(tracker.getCheckpoint(), equalTo(0L)); - assertThat(tracker.contains(seqNo1), equalTo(false)); - assertThat(tracker.contains(seqNo2), equalTo(true)); - tracker.markSeqNoAsCompleted(seqNo1); - assertThat(tracker.getCheckpoint(), equalTo(2L)); - assertThat(tracker.contains(between(0, 2)), equalTo(true)); - assertThat(tracker.contains(atLeast(3)), equalTo(false)); + tracker.markSeqNoAsProcessed(seqNo2); + assertThat(tracker.getProcessedCheckpoint(), equalTo(0L)); + assertThat(tracker.hasProcessed(seqNo1), equalTo(false)); + assertThat(tracker.hasProcessed(seqNo2), equalTo(true)); + tracker.markSeqNoAsProcessed(seqNo1); + assertThat(tracker.getProcessedCheckpoint(), equalTo(2L)); + assertThat(tracker.hasProcessed(between(0, 2)), equalTo(true)); + assertThat(tracker.hasProcessed(atLeast(3)), equalTo(false)); + assertThat(tracker.getPersistedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + assertThat(tracker.getMaxSeqNo(), equalTo(2L)); } - public void testSimpleReplica() { - assertThat(tracker.getCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); - assertThat(tracker.contains(randomNonNegativeLong()), equalTo(false)); - tracker.markSeqNoAsCompleted(0L); - assertThat(tracker.getCheckpoint(), equalTo(0L)); - assertThat(tracker.contains(0), equalTo(true)); - tracker.markSeqNoAsCompleted(2L); - assertThat(tracker.getCheckpoint(), equalTo(0L)); - assertThat(tracker.contains(1L), equalTo(false)); - assertThat(tracker.contains(2L), equalTo(true)); - tracker.markSeqNoAsCompleted(1L); - assertThat(tracker.getCheckpoint(), equalTo(2L)); - assertThat(tracker.contains(between(0, 2)), equalTo(true)); - assertThat(tracker.contains(atLeast(3)), equalTo(false)); + public void testSimplePrimaryPersisted() { + long seqNo1, seqNo2; + assertThat(tracker.getPersistedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + seqNo1 = tracker.generateSeqNo(); + assertThat(seqNo1, equalTo(0L)); + tracker.markSeqNoAsPersisted(seqNo1); + assertThat(tracker.getPersistedCheckpoint(), equalTo(0L)); + seqNo1 = tracker.generateSeqNo(); + seqNo2 = tracker.generateSeqNo(); + assertThat(seqNo1, equalTo(1L)); + assertThat(seqNo2, equalTo(2L)); + tracker.markSeqNoAsPersisted(seqNo2); + assertThat(tracker.getPersistedCheckpoint(), equalTo(0L)); + tracker.markSeqNoAsPersisted(seqNo1); + assertThat(tracker.getPersistedCheckpoint(), equalTo(2L)); + assertThat(tracker.getProcessedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + assertThat(tracker.getMaxSeqNo(), equalTo(2L)); + } + + public void testSimpleReplicaProcessed() { + assertThat(tracker.getProcessedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + assertThat(tracker.hasProcessed(randomNonNegativeLong()), equalTo(false)); + tracker.markSeqNoAsProcessed(0L); + assertThat(tracker.getProcessedCheckpoint(), equalTo(0L)); + assertThat(tracker.hasProcessed(0), equalTo(true)); + tracker.markSeqNoAsProcessed(2L); + assertThat(tracker.getProcessedCheckpoint(), equalTo(0L)); + assertThat(tracker.hasProcessed(1L), equalTo(false)); + assertThat(tracker.hasProcessed(2L), equalTo(true)); + tracker.markSeqNoAsProcessed(1L); + assertThat(tracker.getProcessedCheckpoint(), equalTo(2L)); + assertThat(tracker.hasProcessed(between(0, 2)), equalTo(true)); + assertThat(tracker.hasProcessed(atLeast(3)), equalTo(false)); + assertThat(tracker.getPersistedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + assertThat(tracker.getMaxSeqNo(), equalTo(2L)); + } + + public void testSimpleReplicaPersisted() { + assertThat(tracker.getPersistedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + assertThat(tracker.hasProcessed(randomNonNegativeLong()), equalTo(false)); + tracker.markSeqNoAsPersisted(0L); + assertThat(tracker.getPersistedCheckpoint(), equalTo(0L)); + tracker.markSeqNoAsPersisted(2L); + assertThat(tracker.getPersistedCheckpoint(), equalTo(0L)); + tracker.markSeqNoAsPersisted(1L); + assertThat(tracker.getPersistedCheckpoint(), equalTo(2L)); + assertThat(tracker.getProcessedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + assertThat(tracker.getMaxSeqNo(), equalTo(2L)); } public void testLazyInitialization() { @@ -100,10 +136,10 @@ public void testLazyInitialization() { * sequence numbers this could lead to excessive memory usage resulting in out of memory errors. */ long seqNo = randomNonNegativeLong(); - tracker.markSeqNoAsCompleted(seqNo); + tracker.markSeqNoAsProcessed(seqNo); assertThat(tracker.processedSeqNo.size(), equalTo(1)); - assertThat(tracker.contains(seqNo), equalTo(true)); - assertThat(tracker.contains(randomValueOtherThan(seqNo, ESTestCase::randomNonNegativeLong)), equalTo(false)); + assertThat(tracker.hasProcessed(seqNo), equalTo(true)); + assertThat(tracker.hasProcessed(randomValueOtherThan(seqNo, ESTestCase::randomNonNegativeLong)), equalTo(false)); assertThat(tracker.processedSeqNo.size(), equalTo(1)); } @@ -117,16 +153,16 @@ public void testSimpleOverFlow() { } Collections.shuffle(seqNoList, random()); for (Long seqNo : seqNoList) { - tracker.markSeqNoAsCompleted(seqNo); + tracker.markSeqNoAsProcessed(seqNo); } - assertThat(tracker.checkpoint, equalTo(maxOps - 1L)); + assertThat(tracker.processedCheckpoint.get(), equalTo(maxOps - 1L)); assertThat(tracker.processedSeqNo.size(), equalTo(aligned ? 0 : 1)); if (aligned == false) { - assertThat(tracker.processedSeqNo.keys().iterator().next().value, equalTo(tracker.checkpoint / BIT_SET_SIZE)); + assertThat(tracker.processedSeqNo.keys().iterator().next().value, equalTo(tracker.processedCheckpoint.get() / BIT_SET_SIZE)); } - assertThat(tracker.contains(randomFrom(seqNoList)), equalTo(true)); + assertThat(tracker.hasProcessed(randomFrom(seqNoList)), equalTo(true)); final long notCompletedSeqNo = randomValueOtherThanMany(seqNoList::contains, ESTestCase::randomNonNegativeLong); - assertThat(tracker.contains(notCompletedSeqNo), equalTo(false)); + assertThat(tracker.hasProcessed(notCompletedSeqNo), equalTo(false)); } public void testConcurrentPrimary() throws InterruptedException { @@ -151,7 +187,7 @@ protected void doRun() throws Exception { long seqNo = tracker.generateSeqNo(); logger.info("[t{}] started [{}]", threadId, seqNo); if (seqNo != unFinishedSeq) { - tracker.markSeqNoAsCompleted(seqNo); + tracker.markSeqNoAsProcessed(seqNo); logger.info("[t{}] completed [{}]", threadId, seqNo); } } @@ -163,12 +199,12 @@ protected void doRun() throws Exception { thread.join(); } assertThat(tracker.getMaxSeqNo(), equalTo(maxOps - 1L)); - assertThat(tracker.getCheckpoint(), equalTo(unFinishedSeq - 1L)); - tracker.markSeqNoAsCompleted(unFinishedSeq); - assertThat(tracker.getCheckpoint(), equalTo(maxOps - 1L)); + assertThat(tracker.getProcessedCheckpoint(), equalTo(unFinishedSeq - 1L)); + tracker.markSeqNoAsProcessed(unFinishedSeq); + assertThat(tracker.getProcessedCheckpoint(), equalTo(maxOps - 1L)); assertThat(tracker.processedSeqNo.size(), isOneOf(0, 1)); if (tracker.processedSeqNo.size() == 1) { - assertThat(tracker.processedSeqNo.keys().iterator().next().value, equalTo(tracker.checkpoint / BIT_SET_SIZE)); + assertThat(tracker.processedSeqNo.keys().iterator().next().value, equalTo(tracker.processedCheckpoint.get() / BIT_SET_SIZE)); } } @@ -202,7 +238,7 @@ protected void doRun() throws Exception { Integer[] ops = seqNoPerThread[threadId]; for (int seqNo : ops) { if (seqNo != unFinishedSeq) { - tracker.markSeqNoAsCompleted(seqNo); + tracker.markSeqNoAsProcessed(seqNo); logger.info("[t{}] completed [{}]", threadId, seqNo); } } @@ -214,15 +250,15 @@ protected void doRun() throws Exception { thread.join(); } assertThat(tracker.getMaxSeqNo(), equalTo(maxOps - 1L)); - assertThat(tracker.getCheckpoint(), equalTo(unFinishedSeq - 1L)); - assertThat(tracker.contains(unFinishedSeq), equalTo(false)); - tracker.markSeqNoAsCompleted(unFinishedSeq); - assertThat(tracker.getCheckpoint(), equalTo(maxOps - 1L)); - assertThat(tracker.contains(unFinishedSeq), equalTo(true)); - assertThat(tracker.contains(randomLongBetween(maxOps, Long.MAX_VALUE)), equalTo(false)); + assertThat(tracker.getProcessedCheckpoint(), equalTo(unFinishedSeq - 1L)); + assertThat(tracker.hasProcessed(unFinishedSeq), equalTo(false)); + tracker.markSeqNoAsProcessed(unFinishedSeq); + assertThat(tracker.getProcessedCheckpoint(), equalTo(maxOps - 1L)); + assertThat(tracker.hasProcessed(unFinishedSeq), equalTo(true)); + assertThat(tracker.hasProcessed(randomLongBetween(maxOps, Long.MAX_VALUE)), equalTo(false)); assertThat(tracker.processedSeqNo.size(), isOneOf(0, 1)); if (tracker.processedSeqNo.size() == 1) { - assertThat(tracker.processedSeqNo.keys().iterator().next().value, equalTo(tracker.checkpoint / BIT_SET_SIZE)); + assertThat(tracker.processedSeqNo.keys().iterator().next().value, equalTo(tracker.processedCheckpoint.get() / BIT_SET_SIZE)); } } @@ -234,7 +270,7 @@ public void testWaitForOpsToComplete() throws BrokenBarrierException, Interrupte try { // sychronize starting with the test thread barrier.await(); - tracker.waitForOpsToComplete(seqNo); + tracker.waitForProcessedOpsToComplete(seqNo); complete.set(true); // synchronize with the test thread checking if we are no longer waiting barrier.await(); @@ -251,11 +287,11 @@ public void testWaitForOpsToComplete() throws BrokenBarrierException, Interrupte final List elements = IntStream.rangeClosed(0, seqNo).boxed().collect(Collectors.toList()); Randomness.shuffle(elements); for (int i = 0; i < elements.size() - 1; i++) { - tracker.markSeqNoAsCompleted(elements.get(i)); + tracker.markSeqNoAsProcessed(elements.get(i)); assertFalse(complete.get()); } - tracker.markSeqNoAsCompleted(elements.get(elements.size() - 1)); + tracker.markSeqNoAsProcessed(elements.get(elements.size() - 1)); // synchronize with the waiting thread to mark that it is complete barrier.await(); assertTrue(complete.get()); @@ -268,17 +304,17 @@ public void testContains() { final long localCheckpoint = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, maxSeqNo); final LocalCheckpointTracker tracker = new LocalCheckpointTracker(maxSeqNo, localCheckpoint); if (localCheckpoint >= 0) { - assertThat(tracker.contains(randomLongBetween(0, localCheckpoint)), equalTo(true)); + assertThat(tracker.hasProcessed(randomLongBetween(0, localCheckpoint)), equalTo(true)); } - assertThat(tracker.contains(randomLongBetween(localCheckpoint + 1, Long.MAX_VALUE)), equalTo(false)); + assertThat(tracker.hasProcessed(randomLongBetween(localCheckpoint + 1, Long.MAX_VALUE)), equalTo(false)); final int numOps = between(1, 100); final List seqNos = new ArrayList<>(); for (int i = 0; i < numOps; i++) { long seqNo = randomLongBetween(0, 1000); seqNos.add(seqNo); - tracker.markSeqNoAsCompleted(seqNo); + tracker.markSeqNoAsProcessed(seqNo); } final long seqNo = randomNonNegativeLong(); - assertThat(tracker.contains(seqNo), equalTo(seqNo <= localCheckpoint || seqNos.contains(seqNo))); + assertThat(tracker.hasProcessed(seqNo), equalTo(seqNo <= localCheckpoint || seqNos.contains(seqNo))); } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index d0bc06ab1cc7f..b07e33d0305bd 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -81,7 +81,6 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPoolStats; import org.junit.Assert; @@ -424,7 +423,6 @@ public void testMaybeRollTranslogGeneration() throws Exception { } } - @TestLogging("org.elasticsearch.index.shard:TRACE,org.elasticsearch.index.engine:TRACE") public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception { createIndex("test"); ensureGreen(); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 5187ef37fcdf8..ded420b772266 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -419,7 +419,7 @@ public void testPrimaryPromotionDelaysOperations() throws IOException, BrokenBar } indexShard.acquireReplicaOperationPermit( indexShard.getPendingPrimaryTerm(), - indexShard.getGlobalCheckpoint(), + indexShard.getLastKnownGlobalCheckpoint(), indexShard.getMaxSeqNoOfUpdatesOrDeletes(), new ActionListener() { @Override @@ -716,7 +716,7 @@ public void onFailure(final Exception e) { if (Assertions.ENABLED && indexShard.routingEntry().isRelocationTarget() == false) { assertThat(expectThrows(AssertionError.class, () -> indexShard.acquireReplicaOperationPermit(pendingPrimaryTerm, - indexShard.getGlobalCheckpoint(), indexShard.getMaxSeqNoOfUpdatesOrDeletes(), new ActionListener() { + indexShard.getLastKnownGlobalCheckpoint(), indexShard.getMaxSeqNoOfUpdatesOrDeletes(), new ActionListener() { @Override public void onResponse(Releasable releasable) { fail(); @@ -842,7 +842,7 @@ private Releasable acquirePrimaryOperationPermitBlockingly(IndexShard indexShard private Releasable acquireReplicaOperationPermitBlockingly(IndexShard indexShard, long opPrimaryTerm) throws ExecutionException, InterruptedException { PlainActionFuture fut = new PlainActionFuture<>(); - indexShard.acquireReplicaOperationPermit(opPrimaryTerm, indexShard.getGlobalCheckpoint(), + indexShard.acquireReplicaOperationPermit(opPrimaryTerm, indexShard.getLastKnownGlobalCheckpoint(), randomNonNegativeLong(), fut, ThreadPool.Names.WRITE, ""); return fut.get(); } @@ -921,18 +921,19 @@ public void testOperationPermitOnReplicaShards() throws Exception { final long newPrimaryTerm = primaryTerm + 1 + randomInt(20); if (engineClosed == false) { assertThat(indexShard.getLocalCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); - assertThat(indexShard.getGlobalCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + assertThat(indexShard.getLastKnownGlobalCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); } final long newGlobalCheckPoint; if (engineClosed || randomBoolean()) { newGlobalCheckPoint = SequenceNumbers.NO_OPS_PERFORMED; } else { - long localCheckPoint = indexShard.getGlobalCheckpoint() + randomInt(100); + long localCheckPoint = indexShard.getLastKnownGlobalCheckpoint() + randomInt(100); // advance local checkpoint for (int i = 0; i <= localCheckPoint; i++) { indexShard.markSeqNoAsNoop(i, "dummy doc"); } - newGlobalCheckPoint = randomIntBetween((int) indexShard.getGlobalCheckpoint(), (int) localCheckPoint); + indexShard.sync(); // advance local checkpoint + newGlobalCheckPoint = randomIntBetween((int) indexShard.getLastKnownGlobalCheckpoint(), (int) localCheckPoint); } final long expectedLocalCheckpoint; if (newGlobalCheckPoint == UNASSIGNED_SEQ_NO) { @@ -953,7 +954,7 @@ public void onResponse(Releasable releasable) { assertThat(indexShard.getPendingPrimaryTerm(), equalTo(newPrimaryTerm)); assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm)); assertThat(indexShard.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint)); - assertThat(indexShard.getGlobalCheckpoint(), equalTo(newGlobalCheckPoint)); + assertThat(indexShard.getLastKnownGlobalCheckpoint(), equalTo(newGlobalCheckPoint)); onResponse.set(true); releasable.close(); finish(); @@ -1022,7 +1023,7 @@ private void finish() { // and one after replaying translog (upto the global checkpoint); otherwise we roll translog once. either(equalTo(translogGen + 1)).or(equalTo(translogGen + 2))); assertThat(indexShard.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint)); - assertThat(indexShard.getGlobalCheckpoint(), equalTo(newGlobalCheckPoint)); + assertThat(indexShard.getLastKnownGlobalCheckpoint(), equalTo(newGlobalCheckPoint)); } } thread.join(); @@ -1050,7 +1051,7 @@ public void onFailure(Exception e) { }; final long oldPrimaryTerm = indexShard.getPendingPrimaryTerm() - 1; - randomReplicaOperationPermitAcquisition(indexShard, oldPrimaryTerm, indexShard.getGlobalCheckpoint(), + randomReplicaOperationPermitAcquisition(indexShard, oldPrimaryTerm, indexShard.getLastKnownGlobalCheckpoint(), randomNonNegativeLong(), onLockAcquired, ""); latch.await(); assertFalse(onResponse.get()); @@ -1071,7 +1072,7 @@ public void testAcquireReplicaPermitAdvanceMaxSeqNoOfUpdates() throws Exception long newMaxSeqNoOfUpdates = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); PlainActionFuture fut = new PlainActionFuture<>(); - randomReplicaOperationPermitAcquisition(replica, replica.getOperationPrimaryTerm(), replica.getGlobalCheckpoint(), + randomReplicaOperationPermitAcquisition(replica, replica.getOperationPrimaryTerm(), replica.getLastKnownGlobalCheckpoint(), newMaxSeqNoOfUpdates, fut, ""); try (Releasable ignored = fut.actionGet()) { assertThat(replica.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(Math.max(currentMaxSeqNoOfUpdates, newMaxSeqNoOfUpdates))); @@ -1116,18 +1117,18 @@ public void testGlobalCheckpointSync() throws IOException { primaryShard.updateLocalCheckpointForShard(replicaAllocationId, replicaLocalCheckpoint); // initialize the local knowledge on the primary of the global checkpoint on the replica shard - final int replicaGlobalCheckpoint = - randomIntBetween(Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED), Math.toIntExact(primaryShard.getGlobalCheckpoint())); + final int replicaGlobalCheckpoint = randomIntBetween(Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED), + Math.toIntExact(primaryShard.getLastKnownGlobalCheckpoint())); primaryShard.updateGlobalCheckpointForShard(replicaAllocationId, replicaGlobalCheckpoint); // simulate a background maybe sync; it should only run if the knowledge on the replica of the global checkpoint lags the primary primaryShard.maybeSyncGlobalCheckpoint("test"); assertThat( synced.get(), - equalTo(maxSeqNo == primaryShard.getGlobalCheckpoint() && (replicaGlobalCheckpoint < checkpoint))); + equalTo(maxSeqNo == primaryShard.getLastKnownGlobalCheckpoint() && (replicaGlobalCheckpoint < checkpoint))); // simulate that the background sync advanced the global checkpoint on the replica - primaryShard.updateGlobalCheckpointForShard(replicaAllocationId, primaryShard.getGlobalCheckpoint()); + primaryShard.updateGlobalCheckpointForShard(replicaAllocationId, primaryShard.getLastKnownGlobalCheckpoint()); // reset our boolean so that we can assert after another simulated maybe sync synced.set(false); @@ -1288,7 +1289,7 @@ public void testConcurrentTermIncreaseOnReplicaShard() throws BrokenBarrierExcep } indexShard.acquireReplicaOperationPermit( primaryTerm + increment, - indexShard.getGlobalCheckpoint(), + indexShard.getLastKnownGlobalCheckpoint(), randomNonNegativeLong(), new ActionListener() { @Override @@ -1983,6 +1984,7 @@ public void testRecoverFromStoreWithOutOfOrderDelete() throws IOException { new SourceToParse(shard.shardId().getIndexName(), "_doc", "id-2", new BytesArray("{}"), XContentType.JSON)); shard.applyIndexOperationOnReplica(5, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, new SourceToParse(shard.shardId().getIndexName(), "_doc", "id-5", new BytesArray("{}"), XContentType.JSON)); + shard.sync(); // advance local checkpoint final int translogOps; if (randomBoolean()) { @@ -2914,6 +2916,8 @@ public void testDocStats() throws Exception { // Need to update and sync the global checkpoint as the soft-deletes retention MergePolicy depends on it. if (indexShard.indexSettings.isSoftDeleteEnabled()) { if (indexShard.routingEntry().primary()) { + indexShard.updateLocalCheckpointForShard(indexShard.routingEntry().allocationId().getId(), + indexShard.getLocalCheckpoint()); indexShard.updateGlobalCheckpointForShard(indexShard.routingEntry().allocationId().getId(), indexShard.getLocalCheckpoint()); } else { @@ -3299,6 +3303,7 @@ private Result indexOnReplicaWithGaps( indexShard.flush(new FlushRequest()); } } + indexShard.sync(); // advance local checkpoint assert localCheckpoint == indexShard.getLocalCheckpoint(); assert !gap || (localCheckpoint != max); return new Result(localCheckpoint, max); @@ -3746,7 +3751,7 @@ public void testResetEngine() throws Exception { IndexShard shard = newStartedShard(false); indexOnReplicaWithGaps(shard, between(0, 1000), Math.toIntExact(shard.getLocalCheckpoint())); long maxSeqNoBeforeRollback = shard.seqNoStats().getMaxSeqNo(); - final long globalCheckpoint = randomLongBetween(shard.getGlobalCheckpoint(), shard.getLocalCheckpoint()); + final long globalCheckpoint = randomLongBetween(shard.getLastKnownGlobalCheckpoint(), shard.getLocalCheckpoint()); shard.updateGlobalCheckpointOnReplica(globalCheckpoint, "test"); Set docBelowGlobalCheckpoint = getShardDocUIDs(shard).stream() .filter(id -> Long.parseLong(id) <= globalCheckpoint).collect(Collectors.toSet()); @@ -3830,7 +3835,7 @@ public InternalEngine recoverFromTranslog(TranslogRecoveryRunner translogRecover closeShardThread.start(); final CountDownLatch engineResetLatch = new CountDownLatch(1); - shard.acquireAllReplicaOperationsPermits(shard.getOperationPrimaryTerm(), shard.getGlobalCheckpoint(), 0L, + shard.acquireAllReplicaOperationsPermits(shard.getOperationPrimaryTerm(), shard.getLastKnownGlobalCheckpoint(), 0L, ActionListener.wrap(r -> { try (r) { shard.resetEngineToGlobalCheckpoint(); @@ -3870,7 +3875,7 @@ public InternalEngine recoverFromTranslog(TranslogRecoveryRunner translogRecover }); indexOnReplicaWithGaps(shard, between(0, 1000), Math.toIntExact(shard.getLocalCheckpoint())); - final long globalCheckpoint = randomLongBetween(shard.getGlobalCheckpoint(), shard.getLocalCheckpoint()); + final long globalCheckpoint = randomLongBetween(shard.getLastKnownGlobalCheckpoint(), shard.getLocalCheckpoint()); shard.updateGlobalCheckpointOnReplica(globalCheckpoint, "test"); Thread snapshotThread = new Thread(() -> { @@ -3893,7 +3898,7 @@ public InternalEngine recoverFromTranslog(TranslogRecoveryRunner translogRecover snapshotThread.start(); final CountDownLatch engineResetLatch = new CountDownLatch(1); - shard.acquireAllReplicaOperationsPermits(shard.getOperationPrimaryTerm(), shard.getGlobalCheckpoint(), 0L, + shard.acquireAllReplicaOperationsPermits(shard.getOperationPrimaryTerm(), shard.getLastKnownGlobalCheckpoint(), 0L, ActionListener.wrap(r -> { try (r) { shard.resetEngineToGlobalCheckpoint(); @@ -3917,7 +3922,7 @@ public void testConcurrentAcquireAllReplicaOperationsPermitsWithPrimaryTermUpdat for (int i = 0; i < nbTermUpdates; i++) { long opPrimaryTerm = replica.getOperationPrimaryTerm() + 1; - final long globalCheckpoint = replica.getGlobalCheckpoint(); + final long globalCheckpoint = replica.getLastKnownGlobalCheckpoint(); final long maxSeqNoOfUpdatesOrDeletes = replica.getMaxSeqNoOfUpdatesOrDeletes(); final int operations = scaledRandomIntBetween(5, 32); diff --git a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java index e0825445bb8c2..481aaa233caed 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java @@ -101,7 +101,7 @@ public void testSyncerSendsOffCorrectDocuments() throws Exception { shard.updateShardState(shard.routingEntry(), shard.getPendingPrimaryTerm(), null, 1000L, Collections.singleton(allocationId), new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build()); shard.updateLocalCheckpointForShard(allocationId, globalCheckPoint); - assertEquals(globalCheckPoint, shard.getGlobalCheckpoint()); + assertEquals(globalCheckPoint, shard.getLastKnownGlobalCheckpoint()); logger.info("Total ops: {}, global checkpoint: {}", numDocs, globalCheckPoint); @@ -197,7 +197,7 @@ public void onResponse(PrimaryReplicaSyncer.ResyncTask result) { public void testDoNotSendOperationsWithoutSequenceNumber() throws Exception { IndexShard shard = spy(newStartedShard(true)); - when(shard.getGlobalCheckpoint()).thenReturn(SequenceNumbers.UNASSIGNED_SEQ_NO); + when(shard.getLastKnownGlobalCheckpoint()).thenReturn(SequenceNumbers.UNASSIGNED_SEQ_NO); int numOps = between(0, 20); List operations = new ArrayList<>(); for (int i = 0; i < numOps; i++) { diff --git a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index cd0c90f50779c..3ca29b6b375b0 100644 --- a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -160,7 +160,9 @@ public void testCorruptFileAndRecover() throws ExecutionException, InterruptedEx } indexRandom(true, builders); ensureGreen(); - assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).execute().actionGet()); + // double flush to create safe commit in case of async durability + assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).get()); + assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).get()); // we have to flush at least once here since we don't corrupt the translog SearchResponse countResponse = client().prepareSearch().setSize(0).get(); assertHitCount(countResponse, numDocs); @@ -264,7 +266,9 @@ public void testCorruptPrimaryNoReplica() throws ExecutionException, Interrupted } indexRandom(true, builders); ensureGreen(); - assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).execute().actionGet()); + // double flush to create safe commit in case of async durability + assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).get()); + assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).get()); // we have to flush at least once here since we don't corrupt the translog SearchResponse countResponse = client().prepareSearch().setSize(0).get(); assertHitCount(countResponse, numDocs); diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java index c8d4dbd43df2f..da339ff5c8ec0 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java @@ -171,7 +171,7 @@ private Tuple, TranslogWriter> createReadersAndWriter(final } writer = TranslogWriter.create(new ShardId("index", "uuid", 0), translogUUID, gen, tempDir.resolve(Translog.getFilename(gen)), FileChannel::open, TranslogConfig.DEFAULT_BUFFER_SIZE, 1L, 1L, () -> 1L, - () -> 1L, randomNonNegativeLong(), new TragicExceptionHolder()); + () -> 1L, randomNonNegativeLong(), new TragicExceptionHolder(), seqNo -> {}); writer = Mockito.spy(writer); Mockito.doReturn(now - (numberOfReaders - gen + 1) * 1000).when(writer).getLastModifiedTime(); diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index f2401505cbaad..c99fee9dcb8a7 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -113,6 +113,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.LongConsumer; import java.util.function.LongSupplier; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -147,6 +148,7 @@ public class TranslogTests extends ESTestCase { protected Path translogDir; // A default primary term is used by translog instances created in this test. private final AtomicLong primaryTerm = new AtomicLong(); + private final AtomicReference persistedSeqNoConsumer = new AtomicReference<>(); @Override protected void afterIfSuccessful() throws Exception { @@ -165,16 +167,25 @@ protected void afterIfSuccessful() throws Exception { } + private LongConsumer getPersistedSeqNoConsumer() { + return seqNo -> { + final LongConsumer consumer = persistedSeqNoConsumer.get(); + if (consumer != null) { + consumer.accept(seqNo); + } + }; + } + protected Translog createTranslog(TranslogConfig config) throws IOException { String translogUUID = Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); return new Translog(config, translogUUID, createTranslogDeletionPolicy(config.getIndexSettings()), - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, getPersistedSeqNoConsumer()); } protected Translog openTranslog(TranslogConfig config, String translogUUID) throws IOException { return new Translog(config, translogUUID, createTranslogDeletionPolicy(config.getIndexSettings()), - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, getPersistedSeqNoConsumer()); } @@ -226,7 +237,8 @@ private Translog create(Path path) throws IOException { final TranslogConfig translogConfig = getTranslogConfig(path); final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings()); final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); - return new Translog(translogConfig, translogUUID, deletionPolicy, () -> globalCheckpoint.get(), primaryTerm::get); + return new Translog(translogConfig, translogUUID, deletionPolicy, () -> globalCheckpoint.get(), primaryTerm::get, + getPersistedSeqNoConsumer()); } private TranslogConfig getTranslogConfig(final Path path) { @@ -982,7 +994,7 @@ public void doRun() throws BrokenBarrierException, InterruptedException, IOExcep throw new AssertionError("unsupported operation type [" + type + "]"); } Translog.Location location = translog.add(op); - tracker.markSeqNoAsCompleted(id); + tracker.markSeqNoAsProcessed(id); Translog.Location existing = writtenOps.put(op, location); if (existing != null) { fail("duplicate op [" + op + "], old entry at " + location); @@ -994,7 +1006,7 @@ public void doRun() throws BrokenBarrierException, InterruptedException, IOExcep synchronized (flushMutex) { // we need not do this concurrently as we need to make sure that the generation // we're committing - is still present when we're committing - long localCheckpoint = tracker.getCheckpoint(); + long localCheckpoint = tracker.getProcessedCheckpoint(); translog.rollGeneration(); // expose the new checkpoint (simulating a commit), before we trim the translog lastCommittedLocalCheckpoint.set(localCheckpoint); @@ -1279,6 +1291,8 @@ public void testBasicCheckpoint() throws IOException { public void testTranslogWriter() throws IOException { final TranslogWriter writer = translog.createWriter(translog.currentFileGeneration() + 1); + final Set persistedSeqNos = new HashSet<>(); + persistedSeqNoConsumer.set(persistedSeqNos::add); final int numOps = randomIntBetween(8, 128); byte[] bytes = new byte[4]; ByteArrayDataOutput out = new ByteArrayDataOutput(bytes); @@ -1297,7 +1311,10 @@ public void testTranslogWriter() throws IOException { } writer.add(new BytesArray(bytes), seqNo); } + assertThat(persistedSeqNos, empty()); writer.sync(); + persistedSeqNos.remove(SequenceNumbers.UNASSIGNED_SEQ_NO); + assertEquals(seenSeqNos, persistedSeqNos); final BaseTranslogReader reader = randomBoolean() ? writer : translog.openReader(writer.path(), Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME))); @@ -1401,7 +1418,7 @@ public void testBasicRecovery() throws IOException { } } else { translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}); assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); @@ -1443,7 +1460,7 @@ public void testRecoveryUncommitted() throws IOException { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); try (Translog translog = new Translog(config, translogUUID, deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); @@ -1459,7 +1476,7 @@ public void testRecoveryUncommitted() throws IOException { } if (randomBoolean()) { // recover twice try (Translog translog = new Translog(config, translogUUID, deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration()); @@ -1508,7 +1525,7 @@ public void testRecoveryUncommittedFileExists() throws IOException { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); try (Translog translog = new Translog(config, translogUUID, deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); @@ -1525,7 +1542,7 @@ public void testRecoveryUncommittedFileExists() throws IOException { if (randomBoolean()) { // recover twice try (Translog translog = new Translog(config, translogUUID, deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration()); @@ -1573,7 +1590,7 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); try (Translog ignored = new Translog(config, translogUUID, deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})) { fail("corrupted"); } catch (IllegalStateException ex) { assertEquals("Checkpoint file translog-3.ckp already exists but has corrupted content expected: Checkpoint{offset=3025, " + @@ -1584,7 +1601,7 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), read, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); try (Translog translog = new Translog(config, translogUUID, deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); @@ -1853,12 +1870,14 @@ public void testOpenForeignTranslog() throws IOException { final String foreignTranslog = randomRealisticUnicodeOfCodepointLengthBetween(1, translogGeneration.translogUUID.length()); try { - new Translog(config, foreignTranslog, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + new Translog(config, foreignTranslog, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, + seqNo -> {}); fail("translog doesn't belong to this UUID"); } catch (TranslogCorruptedException ex) { } - this.translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + this.translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, + seqNo -> {}); try (Translog.Snapshot snapshot = this.translog.newSnapshotFromGen(translogGeneration, Long.MAX_VALUE)) { for (int i = firstUncommitted; i < translogOperations; i++) { Translog.Operation next = snapshot.next(); @@ -2052,7 +2071,7 @@ public void testFailFlush() throws IOException { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})) { assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, tlog.currentFileGeneration()); assertFalse(tlog.syncNeeded()); @@ -2191,7 +2210,7 @@ protected void afterAdd() throws IOException { writtenOperations.removeIf(next -> checkpoint.offset < (next.location.translogLocation + next.location.size)); try (Translog tlog = new Translog(config, translogUUID, createTranslogDeletionPolicy(), - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}); Translog.Snapshot snapshot = tlog.newSnapshot()) { if (writtenOperations.size() != snapshot.totalOperations()) { for (int i = 0; i < threadCount; i++) { @@ -2241,7 +2260,7 @@ public void testRecoveryFromAFutureGenerationCleansUp() throws IOException { deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE)); deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}); assertThat(translog.getMinFileGeneration(), equalTo(1L)); // no trimming done yet, just recovered for (long gen = 1; gen < translog.currentFileGeneration(); gen++) { @@ -2300,7 +2319,7 @@ public void testRecoveryFromFailureOnTrimming() throws IOException { deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE)); deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); try (Translog translog = new Translog(config, translogUUID, deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})) { // we don't know when things broke exactly assertThat(translog.getMinFileGeneration(), greaterThanOrEqualTo(1L)); assertThat(translog.getMinFileGeneration(), lessThanOrEqualTo(comittedGeneration)); @@ -2382,7 +2401,8 @@ private Translog getFailableTranslog(final FailSwitch fail, final TranslogConfig translogUUID = Translog.createEmptyTranslog( config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, channelFactory, primaryTerm.get()); } - return new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get) { + return new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, + seqNo -> {}) { @Override ChannelFactory getChannelFactory() { return channelFactory; @@ -2496,9 +2516,10 @@ public void testFailWhileCreateWriteWithRecoveredTLogs() throws IOException { translog.close(); try { new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(), - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get) { + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}) { @Override - protected TranslogWriter createWriter(long fileGeneration, long initialMinTranslogGen, long initialGlobalCheckpoint) + protected TranslogWriter createWriter(long fileGeneration, long initialMinTranslogGen, long initialGlobalCheckpoint, + LongConsumer persistedSequenceNumberConsumer) throws IOException { throw new MockDirectoryWrapper.FakeIOException(); } @@ -2559,7 +2580,7 @@ public void testRecoverWithUnbackedNextGenInIllegalState() throws IOException { Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog")); TranslogException ex = expectThrows(TranslogException.class, () -> new Translog(config, translog.getTranslogUUID(), - translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)); + translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})); assertEquals(ex.getMessage(), "failed to create new translog file"); assertEquals(ex.getCause().getClass(), FileAlreadyExistsException.class); } @@ -2579,7 +2600,7 @@ public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { // we add N+1 and N+2 to ensure we only delete the N+1 file and never jump ahead and wipe without the right condition Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 2) + ".tlog")); try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})) { assertFalse(tlog.syncNeeded()); try (Translog.Snapshot snapshot = tlog.newSnapshot()) { for (int i = 0; i < 1; i++) { @@ -2593,7 +2614,8 @@ public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { } TranslogException ex = expectThrows(TranslogException.class, - () -> new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)); + () -> new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, + seqNo -> {})); assertEquals(ex.getMessage(), "failed to create new translog file"); assertEquals(ex.getCause().getClass(), FileAlreadyExistsException.class); } @@ -2706,7 +2728,7 @@ public void testWithRandomException() throws IOException { SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); } try (Translog translog = new Translog(config, generationUUID, deletionPolicy, - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}); Translog.Snapshot snapshot = translog.newSnapshotFromGen( new Translog.TranslogGeneration(generationUUID, minGenForRecovery), Long.MAX_VALUE)) { assertEquals(syncedDocs.size(), snapshot.totalOperations()); @@ -2773,14 +2795,16 @@ public void testPendingDelete() throws IOException { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(config.getIndexSettings()); translog.close(); - translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, + seqNo -> {}); translog.add(new Translog.Index("test", "2", 1, primaryTerm.get(), new byte[]{2})); translog.rollGeneration(); Closeable lock = translog.acquireRetentionLock(); translog.add(new Translog.Index("test", "3", 2, primaryTerm.get(), new byte[]{3})); translog.close(); IOUtils.close(lock); - translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, + seqNo -> {}); } public static Translog.Location randomTranslogLocation() { @@ -3101,7 +3125,7 @@ public void testTranslogCloseInvariant() throws IOException { class MisbehavingTranslog extends Translog { MisbehavingTranslog(TranslogConfig config, String translogUUID, TranslogDeletionPolicy deletionPolicy, LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier) throws IOException { - super(config, translogUUID, deletionPolicy, globalCheckpointSupplier, primaryTermSupplier); + super(config, translogUUID, deletionPolicy, globalCheckpointSupplier, primaryTermSupplier, seqNo -> {}); } void callCloseDirectly() throws IOException { @@ -3223,7 +3247,7 @@ public void copy(Path source, Path target, CopyOption... options) throws IOExcep assertFalse(brokenTranslog.isOpen()); try (Translog recoveredTranslog = new Translog(getTranslogConfig(path), brokenTranslog.getTranslogUUID(), - brokenTranslog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + brokenTranslog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {})) { recoveredTranslog.rollGeneration(); assertFilePresences(recoveredTranslog); } diff --git a/server/src/test/java/org/elasticsearch/indexing/IndexActionIT.java b/server/src/test/java/org/elasticsearch/indexing/IndexActionIT.java index 90814ed934475..3194f81303767 100644 --- a/server/src/test/java/org/elasticsearch/indexing/IndexActionIT.java +++ b/server/src/test/java/org/elasticsearch/indexing/IndexActionIT.java @@ -29,7 +29,6 @@ import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.ArrayList; import java.util.List; @@ -50,7 +49,6 @@ public class IndexActionIT extends ESIntegTestCase { * while the index is being created. */ - @TestLogging("_root:DEBUG,org.elasticsearch.index.shard.IndexShard:TRACE,org.elasticsearch.action.search:TRACE") public void testAutoGenerateIdNoDuplicates() throws Exception { int numberOfIterations = scaledRandomIntBetween(10, 50); for (int i = 0; i < numberOfIterations; i++) { diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java index 7995c5f5052b7..68653223cf554 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java @@ -34,7 +34,6 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.time.ZoneId; import java.time.ZoneOffset; @@ -51,7 +50,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -@TestLogging(value = "org.elasticsearch.indices.IndicesRequestCache:TRACE,org.elasticsearch.index.engine.Engine:DEBUG") public class IndicesRequestCacheIT extends ESIntegTestCase { // One of the primary purposes of the query cache is to cache aggs results diff --git a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java index 6fe1f7b5591e8..3710988772af3 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java @@ -18,16 +18,6 @@ */ package org.elasticsearch.indices.flush; -import java.io.IOException; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; - import org.apache.lucene.index.Term; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; @@ -49,6 +39,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.engine.InternalEngineTests; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.Uid; @@ -58,7 +49,16 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; @@ -275,12 +275,12 @@ public void testUnallocatedShardsDoesNotHang() throws InterruptedException { private void indexDoc(Engine engine, String id) throws IOException { final ParsedDocument doc = InternalEngineTests.createParsedDoc(id, null); final Engine.IndexResult indexResult = engine.index(new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), doc, - engine.getLocalCheckpoint() + 1, 1L, 1L, null, Engine.Operation.Origin.REPLICA, System.nanoTime(), -1L, false, - SequenceNumbers.UNASSIGNED_SEQ_NO, 0)); + ((InternalEngine) engine).getProcessedLocalCheckpoint() + 1, 1L, 1L, null, Engine.Operation.Origin.REPLICA, System.nanoTime(), + -1L, false, SequenceNumbers.UNASSIGNED_SEQ_NO, 0)); assertThat(indexResult.getFailure(), nullValue()); + engine.syncTranslog(); } - @TestLogging("_root:TRACE") public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); final int numberOfReplicas = internalCluster().numDataNodes() - 1; diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java index 0c056da34a56d..b020c3d6b88dc 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java @@ -31,21 +31,17 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import static org.hamcrest.Matchers.equalTo; -@TestLogging("_root:DEBUG") @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class IndexPrimaryRelocationIT extends ESIntegTestCase { private static final int RELOCATION_COUNT = 15; - @TestLogging("_root:DEBUG,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.index.shard:TRACE," + - "org.elasticsearch.cluster.service:TRACE") public void testPrimaryRelocationWhileIndexing() throws Exception { internalCluster().ensureAtLeastNumDataNodes(randomIntBetween(2, 3)); client().admin().indices().prepareCreate("test") diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index aca82d438976d..18aef3720c31a 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -832,7 +832,6 @@ public void sendRequest(Transport.Connection connection, long requestId, String } } - @TestLogging("org.elasticsearch.indices.recovery:TRACE") public void testHistoryRetention() throws Exception { internalCluster().startNodes(3); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 1dc2ba058b75e..c3f6a3aae89fb 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -143,6 +143,7 @@ public void testRecoveryWithOutOfOrderDeleteWithTranslog() throws Exception { // index #2 orgReplica.applyIndexOperationOnReplica(2, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, new SourceToParse(indexName, "type", "id-2", new BytesArray("{}"), XContentType.JSON)); + orgReplica.sync(); // advance local checkpoint orgReplica.updateGlobalCheckpointOnReplica(3L, "test"); // index #5 -> force NoOp #4. orgReplica.applyIndexOperationOnReplica(5, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, @@ -207,6 +208,7 @@ public void testRecoveryWithOutOfOrderDeleteWithSoftDeletes() throws Exception { // index #2 orgReplica.applyIndexOperationOnReplica(2, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, new SourceToParse(indexName, "type", "id-2", new BytesArray("{}"), XContentType.JSON)); + orgReplica.sync(); // advance local checkpoint orgReplica.updateGlobalCheckpointOnReplica(3L, "test"); // index #5 -> force NoOp #4. orgReplica.applyIndexOperationOnReplica(5, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, @@ -330,11 +332,11 @@ public void testPeerRecoverySendSafeCommitInFileBased() throws Exception { @Override public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps, ActionListener listener) { super.prepareForTranslogOperations(fileBasedRecovery, totalTranslogOps, listener); - assertThat(replicaShard.getGlobalCheckpoint(), equalTo(primaryShard.getGlobalCheckpoint())); + assertThat(replicaShard.getLastKnownGlobalCheckpoint(), equalTo(primaryShard.getLastKnownGlobalCheckpoint())); } @Override public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData) throws IOException { - assertThat(globalCheckpoint, equalTo(primaryShard.getGlobalCheckpoint())); + assertThat(globalCheckpoint, equalTo(primaryShard.getLastKnownGlobalCheckpoint())); super.cleanFiles(totalTranslogOps, globalCheckpoint, sourceMetaData); } }, true, true); diff --git a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java index b39a008de5f4f..2701bfc104c71 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -74,10 +74,9 @@ public class CloseIndexIT extends ESIntegTestCase { @Override public Settings indexSettings() { - Settings.builder().put(super.indexSettings()) + return Settings.builder().put(super.indexSettings()) .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), - new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)); - return super.indexSettings(); + new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)).build(); } public void testCloseMissingIndex() { diff --git a/server/src/test/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java b/server/src/test/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java index 71db668c29f26..2125184baef63 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java @@ -40,7 +40,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.test.transport.StubbableTransport; import org.elasticsearch.transport.TransportService; @@ -87,8 +86,6 @@ protected int maximumNumberOfShards() { return 3; } - @TestLogging("_root_:DEBUG,org.elasticsearch.indices.recovery:TRACE,org.elasticsearch.action.admin.indices.close:TRACE," - + "org.elasticsearch.cluster.metadata.MetaDataIndexStateService:DEBUG") public void testCloseWhileRelocatingShards() throws Exception { final String[] indices = new String[randomIntBetween(3, 5)]; final Map docsPerIndex = new HashMap<>(); diff --git a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index 59e7c21a3e6e8..95c9b1adf6ada 100644 --- a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -1204,7 +1204,7 @@ private void persistGlobalCheckpoint(String index) throws Exception { for (IndexService indexService : indexServices) { for (IndexShard indexShard : indexService) { indexShard.sync(); - assertThat(indexShard.getLastSyncedGlobalCheckpoint(), equalTo(indexShard.getGlobalCheckpoint())); + assertThat(indexShard.getLastSyncedGlobalCheckpoint(), equalTo(indexShard.getLastKnownGlobalCheckpoint())); } } } diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java index 63ff7f9f97463..d7ee34a860bd7 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java @@ -25,7 +25,6 @@ import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.ArrayList; import java.util.Collection; @@ -48,7 +47,6 @@ protected boolean ignoreExternalCluster() { return true; } - @TestLogging("org.elasticsearch.persistent:TRACE,org.elasticsearch.cluster.service:DEBUG") public void testFullClusterRestart() throws Exception { PersistentTasksService service = internalCluster().getInstance(PersistentTasksService.class); int numberOfTasks = randomIntBetween(1, 10); diff --git a/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java b/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java index 7002a77b6ba10..9e5a170b6b334 100644 --- a/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java +++ b/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java @@ -42,7 +42,6 @@ import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.Arrays; import java.util.Collection; @@ -61,8 +60,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; -@TestLogging("_root:DEBUG,org.elasticsearch.index.shard:TRACE,org.elasticsearch.cluster.service:TRACE," + - "org.elasticsearch.index.seqno:TRACE,org.elasticsearch.indices.recovery:TRACE") public class RecoveryWhileUnderLoadIT extends ESIntegTestCase { private final Logger logger = LogManager.getLogger(RecoveryWhileUnderLoadIT.class); diff --git a/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java index c2d35279bdff4..369daef08d1c3 100644 --- a/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -60,7 +60,6 @@ import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.MockIndexEventListener; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.test.transport.StubbableTransport; import org.elasticsearch.transport.Transport; @@ -94,7 +93,6 @@ import static org.hamcrest.Matchers.startsWith; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) -@TestLogging("_root:DEBUG,org.elasticsearch.indices.recovery:TRACE,org.elasticsearch.index.shard.service:TRACE") public class RelocationIT extends ESIntegTestCase { private final TimeValue ACCEPTABLE_RELOCATION_TIME = new TimeValue(5, TimeUnit.MINUTES); @@ -162,7 +160,6 @@ public void testSimpleRelocationNoIndexing() { assertThat(client().prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(20L)); } - @TestLogging("org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.search:TRACE") public void testRelocationWhileIndexingRandom() throws Exception { int numberOfRelocations = scaledRandomIntBetween(1, rarely() ? 10 : 4); int numberOfReplicas = randomBoolean() ? 0 : 1; @@ -266,7 +263,6 @@ public void testRelocationWhileIndexingRandom() throws Exception { } } - @TestLogging("org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.search:TRACE") public void testRelocationWhileRefreshing() throws Exception { int numberOfRelocations = scaledRandomIntBetween(1, rarely() ? 10 : 4); int numberOfReplicas = randomBoolean() ? 0 : 1; @@ -450,11 +446,6 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IO } } - @TestLogging( - "org.elasticsearch.action.bulk:TRACE," - + "org.elasticsearch.action.search:TRACE," - + "org.elasticsearch.cluster.service:TRACE," - + "org.elasticsearch.index.seqno:TRACE") public void testIndexAndRelocateConcurrently() throws ExecutionException, InterruptedException { int halfNodes = randomIntBetween(1, 3); Settings[] nodeSettings = Stream.concat( diff --git a/server/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/server/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index 973c687ebe84c..3000d7262db77 100644 --- a/server/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/server/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -108,6 +108,7 @@ public void testCancelRecoveryAndResume() throws Exception { ensureGreen(); // ensure we have flushed segments and make them a big one via optimize client().admin().indices().prepareFlush().setForce(true).get(); + client().admin().indices().prepareFlush().setForce(true).get(); // double flush to create safe commit in case of async durability client().admin().indices().prepareForceMerge().setMaxNumSegments(1).setFlush(true).get(); final CountDownLatch latch = new CountDownLatch(1); @@ -119,7 +120,7 @@ public void testCancelRecoveryAndResume() throws Exception { (connection, requestId, action, request, options) -> { if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; - logger.debug("file chunk [{}] lastChunk: {}", req, req.lastChunk()); + logger.info("file chunk [{}] lastChunk: {}", req, req.lastChunk()); if ((req.name().endsWith("cfs") || req.name().endsWith("fdt")) && req.lastChunk() && truncate.get()) { latch.countDown(); throw new RuntimeException("Caused some truncated files for fun and profit"); diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestIndicesActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestIndicesActionTests.java index ae387806796fc..b9a42b5cc1c7b 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestIndicesActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestIndicesActionTests.java @@ -151,7 +151,7 @@ public void testBuildTable() { } } - private IndicesStatsResponse randomIndicesStatsResponse(final IndexMetaData[] indices) { + public static IndicesStatsResponse randomIndicesStatsResponse(final IndexMetaData[] indices) { List shardStats = new ArrayList<>(); for (final IndexMetaData index : indices) { int numShards = randomIntBetween(1, 3); diff --git a/server/src/test/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java b/server/src/test/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java index b304bf2d79779..494a8139217c7 100644 --- a/server/src/test/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java +++ b/server/src/test/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java @@ -25,7 +25,6 @@ import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -34,7 +33,6 @@ * This test basically verifies that search with a single shard active (cause we indexed to it) and other * shards possibly not active at all (cause they haven't allocated) will still work. */ -@TestLogging("_root:DEBUG") public class SearchWhileCreatingIndexIT extends ESIntegTestCase { public void testIndexCausesIndexCreation() throws Exception { searchWhileCreatingIndex(false, 1); // 1 replica in our default... diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 217c59e2b3169..00f416e5ce52a 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -95,7 +95,6 @@ import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.StoredScriptsIT; import org.elasticsearch.snapshots.mockstore.MockRepository; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; @@ -1939,8 +1938,6 @@ public void testDeleteRepositoryWhileSnapshotting() throws Exception { assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits().value, equalTo(100L)); } - @TestLogging("_root:DEBUG") // this fails every now and then: https://github.com/elastic/elasticsearch/issues/18121 but without - // more logs we cannot find out why public void testReadonlyRepository() throws Exception { Client client = client(); logger.info("--> creating repository"); @@ -3691,6 +3688,7 @@ public void testSnapshottingWithMissingSequenceNumbers() { for (int i = 10; i < 15; i++) { index(indexName, "_doc", Integer.toString(i), "foo", "bar" + i); } + client().admin().indices().prepareFlush(indexName).setForce(true).setWaitIfOngoing(true).get(); stats = client().admin().indices().prepareStats(indexName).clear().get(); shardStats = stats.getShards()[0]; @@ -3821,7 +3819,6 @@ public void testParallelRestoreOperationsFromSingleSnapshot() throws Exception { assertThat(client.prepareGet(restoredIndexName2, typeName, sameSourceIndex ? docId : docId2).get().isExists(), equalTo(true)); } - @TestLogging("org.elasticsearch.snapshots:TRACE") public void testAbortedSnapshotDuringInitDoesNotStart() throws Exception { final Client client = client(); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 5f6d31dce3e97..e93755873c917 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -58,7 +58,6 @@ import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.test.transport.StubbableConnectionManager; import org.elasticsearch.test.transport.StubbableTransport; @@ -678,7 +677,6 @@ public void run() { } } - @TestLogging("_root:DEBUG, org.elasticsearch.transport:TRACE") public void testCloseWhileConcurrentlyConnecting() throws IOException, InterruptedException, BrokenBarrierException { List knownNodes = new CopyOnWriteArrayList<>(); try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index 9b0f687fc0738..702e1f09fb8cf 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -41,6 +41,7 @@ import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -840,6 +841,81 @@ public void testGetNodePredicatesCombination() { } } + public void testReconnectWhenSeedsNodesAreUpdated() throws Exception { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService cluster_node_0 = startTransport("cluster_node_0", knownNodes, Version.CURRENT); + MockTransportService cluster_node_1 = startTransport("cluster_node_1", knownNodes, Version.CURRENT)) { + + final DiscoveryNode node0 = cluster_node_0.getLocalDiscoNode(); + final DiscoveryNode node1 = cluster_node_1.getLocalDiscoNode(); + knownNodes.add(node0); + knownNodes.add(node1); + Collections.shuffle(knownNodes, random()); + + try (MockTransportService transportService = + MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + transportService.start(); + transportService.acceptIncomingRequests(); + + final Settings.Builder builder = Settings.builder(); + builder.putList("cluster.remote.cluster_test.seeds", Collections.singletonList(node0.getAddress().toString())); + try (RemoteClusterService service = new RemoteClusterService(builder.build(), transportService)) { + assertFalse(service.isCrossClusterSearchEnabled()); + service.initializeRemoteClusters(); + assertTrue(service.isCrossClusterSearchEnabled()); + + final RemoteClusterConnection firstRemoteClusterConnection = service.getRemoteClusterConnection("cluster_test"); + assertTrue(firstRemoteClusterConnection.isNodeConnected(node0)); + assertTrue(firstRemoteClusterConnection.isNodeConnected(node1)); + assertEquals(2, firstRemoteClusterConnection.getNumNodesConnected()); + assertFalse(firstRemoteClusterConnection.isClosed()); + + final CountDownLatch firstLatch = new CountDownLatch(1); + service.updateRemoteCluster( + "cluster_test", + Collections.singletonList(node0.getAddress().toString()), null, + genericProfile("cluster_test"), connectionListener(firstLatch)); + firstLatch.await(); + + assertTrue(service.isCrossClusterSearchEnabled()); + assertTrue(firstRemoteClusterConnection.isNodeConnected(node0)); + assertTrue(firstRemoteClusterConnection.isNodeConnected(node1)); + assertEquals(2, firstRemoteClusterConnection.getNumNodesConnected()); + assertFalse(firstRemoteClusterConnection.isClosed()); + assertSame(firstRemoteClusterConnection, service.getRemoteClusterConnection("cluster_test")); + + final List newSeeds = new ArrayList<>(); + newSeeds.add(node1.getAddress().toString()); + if (randomBoolean()) { + newSeeds.add(node0.getAddress().toString()); + Collections.shuffle(newSeeds, random()); + } + + final CountDownLatch secondLatch = new CountDownLatch(1); + service.updateRemoteCluster( + "cluster_test", + newSeeds, null, + genericProfile("cluster_test"), connectionListener(secondLatch)); + secondLatch.await(); + + assertTrue(service.isCrossClusterSearchEnabled()); + assertBusy(() -> { + assertFalse(firstRemoteClusterConnection.isNodeConnected(node0)); + assertFalse(firstRemoteClusterConnection.isNodeConnected(node1)); + assertEquals(0, firstRemoteClusterConnection.getNumNodesConnected()); + assertTrue(firstRemoteClusterConnection.isClosed()); + }); + + final RemoteClusterConnection secondRemoteClusterConnection = service.getRemoteClusterConnection("cluster_test"); + assertTrue(secondRemoteClusterConnection.isNodeConnected(node0)); + assertTrue(secondRemoteClusterConnection.isNodeConnected(node1)); + assertEquals(2, secondRemoteClusterConnection.getNumNodesConnected()); + assertFalse(secondRemoteClusterConnection.isClosed()); + } + } + } + } + public void testRemoteClusterWithProxy() throws Exception { List knownNodes = new CopyOnWriteArrayList<>(); try (MockTransportService cluster_1_node0 = startTransport("cluster_1_node0", knownNodes, Version.CURRENT); diff --git a/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java b/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java index dbff3de16cbd5..62048119b6974 100644 --- a/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java +++ b/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java @@ -38,7 +38,6 @@ import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.FileInputStream; import java.io.IOException; @@ -119,10 +118,6 @@ * */ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, minNumDataNodes = 4, maxNumDataNodes = 6) -@TestLogging("_root:DEBUG,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.get:TRACE," + - "org.elasticsearch.discovery:TRACE,org.elasticsearch.action.support.replication:TRACE," + - "org.elasticsearch.cluster.service:TRACE,org.elasticsearch.indices.recovery:TRACE," + - "org.elasticsearch.indices.cluster:TRACE,org.elasticsearch.index.shard:TRACE") public class ConcurrentSeqNoVersioningIT extends AbstractDisruptionTestCase { private static final Pattern EXTRACT_VERSION = Pattern.compile("current document has seqNo \\[(\\d+)\\] and primary term \\[(\\d+)\\]"); diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 02fc3ae9b45af..e68ae70921263 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -411,7 +411,7 @@ protected Translog createTranslog(Path translogPath, LongSupplier primaryTermSup String translogUUID = Translog.createEmptyTranslog(translogPath, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTermSupplier.getAsLong()); return new Translog(translogConfig, translogUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), - () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTermSupplier); + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTermSupplier, seqNo -> {}); } protected TranslogHandler createTranslogHandler(IndexSettings indexSettings) { @@ -1147,7 +1147,7 @@ public static Translog getTranslog(Engine engine) { * @throws InterruptedException if the thread was interrupted while blocking on the condition */ public static void waitForOpsToComplete(InternalEngine engine, long seqNo) throws InterruptedException { - engine.getLocalCheckpointTracker().waitForOpsToComplete(seqNo); + engine.getLocalCheckpointTracker().waitForProcessedOpsToComplete(seqNo); } public static boolean hasSnapshottedCommits(Engine engine) { diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java index 2b597a64c371e..8e8b4687844b2 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java @@ -113,6 +113,7 @@ public int run(Engine engine, Translog.Snapshot snapshot) throws IOException { opsRecovered++; appliedOperations.incrementAndGet(); } + engine.syncTranslog(); return opsRecovered; } diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 081318fd8132c..b91197bfe0851 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -552,7 +552,7 @@ public synchronized void removeRetentionLease(String id, ActionListener acquirePermitFuture = new PlainActionFuture<>(); - replica.acquireReplicaOperationPermit(getPrimary().getOperationPrimaryTerm(), getPrimary().getGlobalCheckpoint(), + replica.acquireReplicaOperationPermit(getPrimary().getOperationPrimaryTerm(), getPrimary().getLastKnownGlobalCheckpoint(), getPrimary().getMaxSeqNoOfUpdatesOrDeletes(), acquirePermitFuture, ThreadPool.Names.SAME, request); try (Releasable ignored = acquirePermitFuture.actionGet()) { replica.updateRetentionLeasesOnReplica(request.getRetentionLeases()); @@ -658,7 +658,12 @@ public long localCheckpoint() { @Override public long globalCheckpoint() { - return getPrimaryShard().getGlobalCheckpoint(); + return getPrimaryShard().getLastSyncedGlobalCheckpoint(); + } + + @Override + public long computedGlobalCheckpoint() { + return getPrimaryShard().getLastKnownGlobalCheckpoint(); } @Override @@ -692,7 +697,8 @@ public void performOn( try { performOnReplica(request, replica); releasable.close(); - delegatedListener.onResponse(new ReplicaResponse(replica.getLocalCheckpoint(), replica.getGlobalCheckpoint())); + delegatedListener.onResponse(new ReplicaResponse(replica.getLocalCheckpoint(), + replica.getLastKnownGlobalCheckpoint())); } catch (final Exception e) { Releasables.closeWhileHandlingException(releasable); delegatedListener.onFailure(e); @@ -755,7 +761,7 @@ protected void performOnPrimary(IndexShard primary, BulkShardRequest request, Ac @Override protected void performOnReplica(BulkShardRequest request, IndexShard replica) throws Exception { executeShardBulkOnReplica(request, replica, getPrimaryShard().getPendingPrimaryTerm(), - getPrimaryShard().getGlobalCheckpoint(), getPrimaryShard().getMaxSeqNoOfUpdatesOrDeletes()); + getPrimaryShard().getLastKnownGlobalCheckpoint(), getPrimaryShard().getMaxSeqNoOfUpdatesOrDeletes()); } } @@ -826,7 +832,7 @@ void indexOnReplica(BulkShardRequest request, ReplicationGroup group, IndexShard void indexOnReplica(BulkShardRequest request, ReplicationGroup group, IndexShard replica, long term) throws Exception { executeShardBulkOnReplica(request, replica, term, - group.primary.getGlobalCheckpoint(), group.primary.getMaxSeqNoOfUpdatesOrDeletes()); + group.primary.getLastKnownGlobalCheckpoint(), group.primary.getMaxSeqNoOfUpdatesOrDeletes()); } /** @@ -834,7 +840,7 @@ void indexOnReplica(BulkShardRequest request, ReplicationGroup group, IndexShard */ void deleteOnReplica(BulkShardRequest request, ReplicationGroup group, IndexShard replica) throws Exception { executeShardBulkOnReplica(request, replica, group.primary.getPendingPrimaryTerm(), - group.primary.getGlobalCheckpoint(), group.primary.getMaxSeqNoOfUpdatesOrDeletes()); + group.primary.getLastKnownGlobalCheckpoint(), group.primary.getMaxSeqNoOfUpdatesOrDeletes()); } class GlobalCheckpointSync extends ReplicationAction< @@ -883,7 +889,7 @@ protected void performOnPrimary(IndexShard primary, ResyncReplicationRequest req @Override protected void performOnReplica(ResyncReplicationRequest request, IndexShard replica) throws Exception { executeResyncOnReplica(replica, request, getPrimaryShard().getPendingPrimaryTerm(), - getPrimaryShard().getGlobalCheckpoint(), getPrimaryShard().getMaxSeqNoOfUpdatesOrDeletes()); + getPrimaryShard().getLastKnownGlobalCheckpoint(), getPrimaryShard().getMaxSeqNoOfUpdatesOrDeletes()); } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index a14fbd1583f89..4b5be29205778 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -753,12 +753,14 @@ protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id, result = shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, sourceToParse, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); } + shard.sync(); // advance local checkpoint shard.updateLocalCheckpointForShard(shard.routingEntry().allocationId().getId(), shard.getLocalCheckpoint()); } else { final long seqNo = shard.seqNoStats().getMaxSeqNo() + 1; shard.advanceMaxSeqNoOfUpdatesOrDeletes(seqNo); // manually replicate max_seq_no_of_updates result = shard.applyIndexOperationOnReplica(seqNo, 0, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse); + shard.sync(); // advance local checkpoint if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { throw new TransportReplicationAction.RetryOnReplicaException(shard.shardId, "Mappings are not available on the replica yet, triggered update: " + result.getRequiredMappingUpdate()); @@ -777,11 +779,14 @@ protected Engine.DeleteResult deleteDoc(IndexShard shard, String type, String id if (shard.routingEntry().primary()) { result = shard.applyDeleteOperationOnPrimary( Versions.MATCH_ANY, type, id, VersionType.INTERNAL, SequenceNumbers.UNASSIGNED_SEQ_NO, 0); - shard.updateLocalCheckpointForShard(shard.routingEntry().allocationId().getId(), shard.getEngine().getLocalCheckpoint()); + shard.sync(); // advance local checkpoint + shard.updateLocalCheckpointForShard(shard.routingEntry().allocationId().getId(), + shard.getLocalCheckpoint()); } else { final long seqNo = shard.seqNoStats().getMaxSeqNo() + 1; shard.advanceMaxSeqNoOfUpdatesOrDeletes(seqNo); // manually replicate max_seq_no_of_updates result = shard.applyDeleteOperationOnReplica(seqNo, 0L, type, id); + shard.sync(); // advance local checkpoint } return result; } diff --git a/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java index c2e97f35faee9..d46c09e12621d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java +++ b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java @@ -27,8 +27,10 @@ import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -141,7 +143,7 @@ public void run() { } } - BulkRequestBuilder bulkRequest = client.prepareBulk(); + BulkRequestBuilder bulkRequest = client.prepareBulk().setTimeout(timeout); for (int i = 0; i < batchSize; i++) { id = idGenerator.incrementAndGet(); if (useAutoGeneratedIDs) { @@ -151,16 +153,21 @@ public void run() { .setSource(generateSource(id, threadRandom))); } } - BulkResponse bulkResponse = bulkRequest.get(); - for (BulkItemResponse bulkItemResponse : bulkResponse) { - if (bulkItemResponse.isFailed() == false) { - boolean add = ids.add(bulkItemResponse.getId()); - assert add : "ID: " + bulkItemResponse.getId() + " already used"; - } else { - failures.add(bulkItemResponse.getFailure().getCause()); + try { + BulkResponse bulkResponse = bulkRequest.get(); + for (BulkItemResponse bulkItemResponse : bulkResponse) { + if (bulkItemResponse.isFailed() == false) { + boolean add = ids.add(bulkItemResponse.getId()); + assert add : "ID: " + bulkItemResponse.getId() + " already used"; + } else { + failures.add(bulkItemResponse.getFailure().getCause()); + } + } + } catch (Exception e) { + if (ignoreIndexingFailures == false) { + throw e; } } - } else { if (hasBudget.get() && !availableBudget.tryAcquire(250, TimeUnit.MILLISECONDS)) { @@ -169,15 +176,27 @@ public void run() { } id = idGenerator.incrementAndGet(); if (useAutoGeneratedIDs) { - IndexResponse indexResponse = client.prepareIndex(index, type) - .setSource(generateSource(id, threadRandom)).get(); - boolean add = ids.add(indexResponse.getId()); - assert add : "ID: " + indexResponse.getId() + " already used"; + try { + IndexResponse indexResponse = client.prepareIndex(index, type) + .setTimeout(timeout).setSource(generateSource(id, threadRandom)).get(); + boolean add = ids.add(indexResponse.getId()); + assert add : "ID: " + indexResponse.getId() + " already used"; + } catch (Exception e) { + if (ignoreIndexingFailures == false) { + throw e; + } + } } else { - IndexResponse indexResponse = client.prepareIndex(index, type, Long.toString(id)) - .setSource(generateSource(id, threadRandom)).get(); - boolean add = ids.add(indexResponse.getId()); - assert add : "ID: " + indexResponse.getId() + " already used"; + try { + IndexResponse indexResponse = client.prepareIndex(index, type, Long.toString(id)) + .setTimeout(timeout).setSource(generateSource(id, threadRandom)).get(); + boolean add = ids.add(indexResponse.getId()); + assert add : "ID: " + indexResponse.getId() + " already used"; + } catch (Exception e) { + if (ignoreIndexingFailures == false) { + throw e; + } + } } } } @@ -217,6 +236,18 @@ private XContentBuilder generateSource(long id, Random random) throws IOExceptio } + private volatile TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT; + + public void setRequestTimeout(TimeValue timeout) { + this.timeout = timeout; + } + + private volatile boolean ignoreIndexingFailures; + + public void setIgnoreIndexingFailures(boolean ignoreIndexingFailures) { + this.ignoreIndexingFailures = ignoreIndexingFailures; + } + private void setBudget(int numOfDocs) { logger.debug("updating budget to [{}]", numOfDocs); if (numOfDocs >= 0) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 549683ab20ad7..470c5ec8c49d5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -1731,6 +1731,8 @@ public synchronized void fullRestart(RestartCallback callback) throws Exception nodesByRoles.computeIfAbsent(discoveryNode.getRoles(), k -> new ArrayList<>()).add(nodeAndClient); } + callback.onAllNodesStopped(); + assert nodesByRoles.values().stream().mapToInt(List::size).sum() == nodeCount; // randomize start up order, but making sure that: @@ -2174,6 +2176,9 @@ public Settings onNodeStopped(String nodeName) throws Exception { return Settings.EMPTY; } + public void onAllNodesStopped() throws Exception { + } + /** * Executed for each node before the {@code n + 1} node is restarted. The given client is * an active client to the node that will be restarted next. diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index b6b925c33fdbe..630b43ed71e58 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -4,6 +4,7 @@ import org.elasticsearch.gradle.test.NodeInfo import java.nio.charset.StandardCharsets +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -25,14 +26,13 @@ subprojects { project.dependencies.add('featureAwarePlugin', project(':x-pack:test:feature-aware')) project.dependencies.add('featureAwarePlugin', project.sourceSets.main.output.getClassesDirs()) - final Task featureAwareTask = project.tasks.create("featureAwareCheck", LoggedExec) { + File successMarker = file("$buildDir/markers/featureAware") + task featureAwareCheck(type: LoggedExec) { description = "Runs FeatureAwareCheck on main classes." dependsOn project.configurations.featureAwarePlugin - - final File successMarker = new File(project.buildDir, 'markers/featureAware') outputs.file(successMarker) - executable = new File(project.runtimeJavaHome, 'bin/java') + executable = "${project.runtimeJavaHome}/bin/java" // default to main class files if such a source set exists final List files = [] @@ -53,7 +53,7 @@ subprojects { } } - project.precommit.dependsOn featureAwareTask + project.precommit.dependsOn featureAwareCheck } } } @@ -85,28 +85,8 @@ artifacts { testArtifacts testJar } -integTestRunner { - /* - * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each - * other if we allow them to set the number of available processors as it's set-once in Netty. - */ - systemProperty 'es.set.netty.runtime.available.processors', 'false' - - - // TODO: fix this rest test to not depend on a hardcoded port! - def blacklist = ['getting_started/10_monitor_cluster_health/*'] - boolean snapshot = "true".equals(System.getProperty("build.snapshot", "true")) - if (!snapshot) { - // these tests attempt to install basic/internal licenses signed against the dev/public.key - // Since there is no infrastructure in place (anytime soon) to generate licenses using the production - // private key, these tests are whitelisted in non-snapshot test runs - blacklist.addAll(['xpack/15_basic/*', 'license/20_put_license/*']) - } - systemProperty 'tests.rest.blacklist', blacklist.join(',') -} - // location for keys and certificates -File keystoreDir = new File(project.buildDir, 'keystore') +File keystoreDir = file("$buildDir/keystore") File nodeKey = file("$keystoreDir/testnode.pem") File nodeCert = file("$keystoreDir/testnode.crt") @@ -125,8 +105,29 @@ task copyKeyCerts(type: Copy) { sourceSets.test.resources.srcDir(keystoreDir) processTestResources.dependsOn(copyKeyCerts) -integTestCluster { +integTest.runner { + /* + * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each + * other if we allow them to set the number of available processors as it's set-once in Netty. + */ + systemProperty 'es.set.netty.runtime.available.processors', 'false' + + + // TODO: fix this rest test to not depend on a hardcoded port! + def blacklist = ['getting_started/10_monitor_cluster_health/*'] + boolean snapshot = Boolean.valueOf(System.getProperty("build.snapshot", "true")) + if (!snapshot) { + // these tests attempt to install basic/internal licenses signed against the dev/public.key + // Since there is no infrastructure in place (anytime soon) to generate licenses using the production + // private key, these tests are whitelisted in non-snapshot test runs + blacklist.addAll(['xpack/15_basic/*', 'license/20_put_license/*']) + } + systemProperty 'tests.rest.blacklist', blacklist.join(',') dependsOn copyKeyCerts +} + +testClusters.integTest { + distribution = 'DEFAULT' // this is important since we use the reindex module in ML setting 'xpack.ml.enabled', 'true' setting 'xpack.security.enabled', 'true' // Integration tests are supposed to enable/disable exporters before/after each test @@ -140,50 +141,10 @@ integTestCluster { setting 'xpack.security.transport.ssl.verification_mode', 'certificate' setting 'xpack.security.audit.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' - keystoreSetting 'bootstrap.password', 'x-pack-test-password' - keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - distribution = 'default' // this is important since we use the reindex module in ML - - setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'x_pack_rest_user', '-p', 'x-pack-test-password', '-r', 'superuser' + keystore 'bootstrap.password', 'x-pack-test-password' + keystore 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + user username: "x_pack_rest_user", password: "x-pack-test-password" extraConfigFile nodeKey.name, nodeKey extraConfigFile nodeCert.name, nodeCert - - waitCondition = { NodeInfo node, AntBuilder ant -> - File tmpFile = new File(node.cwd, 'wait.success') - - for (int i = 0; i < 10; i++) { - // we use custom wait logic here as the elastic user is not available immediately and ant.get will fail when a 401 is returned - HttpURLConnection httpURLConnection = null; - try { - httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}&wait_for_status=yellow").openConnection(); - httpURLConnection.setRequestProperty("Authorization", "Basic " + - Base64.getEncoder().encodeToString("x_pack_rest_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8))); - httpURLConnection.setRequestMethod("GET"); - httpURLConnection.connect(); - if (httpURLConnection.getResponseCode() == 200) { - tmpFile.withWriter StandardCharsets.UTF_8.name(), { - it.write(httpURLConnection.getInputStream().getText(StandardCharsets.UTF_8.name())) - } - } - } catch (Exception e) { - if (i == 9) { - logger.error("final attempt of calling cluster health failed", e) - } else { - logger.debug("failed to call cluster health", e) - } - } finally { - if (httpURLConnection != null) { - httpURLConnection.disconnect(); - } - } - - // did not start, so wait a bit before trying again - Thread.sleep(500L); - } - return tmpFile.exists() - } -} -if (integTestCluster.distribution.startsWith("oss-")) { - integTest.enabled = false } diff --git a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle index 17de70794017e..a45d3e1b32607 100644 --- a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle +++ b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle @@ -23,8 +23,8 @@ testClusters."leader-cluster" { setting 'xpack.license.self_generated.type', 'trial' } +File policyFile = file("${buildDir}/tmp/java.policy") task writeJavaPolicy { - ext.policyFile = file("${buildDir}/tmp/java.policy") doLast { if (policyFile.parentFile.exists() == false && policyFile.parentFile.mkdirs() == false) { throw new GradleException("failed to create temporary directory [${tmp}]") @@ -40,10 +40,10 @@ task writeJavaPolicy { } task "follow-cluster"(type: RestIntegTestTask) { - dependsOn writeJavaPolicy, "leader-cluster" + dependsOn 'writeJavaPolicy', "leader-cluster" useCluster testClusters."leader-cluster" runner { - systemProperty 'java.security.policy', "file://${writeJavaPolicy.policyFile}" + systemProperty 'java.security.policy', "file://${policyFile}" systemProperty 'tests.target_cluster', 'follow' nonInputProperties.systemProperty 'tests.leader_host', "${-> testClusters."leader-cluster".getAllHttpSocketURI().get(0)}" nonInputProperties.systemProperty 'log', "${-> testClusters."follow-cluster".getFirstNode().getServerLog()}" diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java index 45ffbf6998d90..5f8f1d5368a62 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java @@ -144,10 +144,11 @@ public static CcrWritePrimaryResult shardOperationOnPrimary( assert failure.getSeqNo() == targetOp.seqNo() : targetOp.seqNo() + " != " + failure.getSeqNo(); if (failure.getExistingPrimaryTerm().isPresent()) { appliedOperations.add(rewriteOperationWithPrimaryTerm(sourceOp, failure.getExistingPrimaryTerm().getAsLong())); - } else if (targetOp.seqNo() > primary.getGlobalCheckpoint()) { - assert false : "can't find primary_term for existing op=" + targetOp + " gcp=" + primary.getGlobalCheckpoint(); + } else if (targetOp.seqNo() > primary.getLastKnownGlobalCheckpoint()) { + assert false : + "can't find primary_term for existing op=" + targetOp + " gcp=" + primary.getLastKnownGlobalCheckpoint(); throw new IllegalStateException("can't find primary_term for existing op=" + targetOp + - " global_checkpoint=" + primary.getGlobalCheckpoint(), failure); + " global_checkpoint=" + primary.getLastKnownGlobalCheckpoint(), failure); } } else { assert false : "Only already-processed error should happen; op=[" + targetOp + "] error=[" + result.getFailure() + "]"; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java index 619e0a04baf9a..8d4f0b219bd2c 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java @@ -90,7 +90,7 @@ protected InternalEngine.IndexingStrategy indexingStrategyForOperation(final Ind } else { return IndexingStrategy.processButSkipLucene(false, index.version()); } - } else if (maxSeqNoOfUpdatesOrDeletes <= getLocalCheckpoint()) { + } else if (maxSeqNoOfUpdatesOrDeletes <= getProcessedLocalCheckpoint()) { assert maxSeqNoOfUpdatesOrDeletes < index.seqNo() : "seq_no[" + index.seqNo() + "] <= msu[" + maxSeqNoOfUpdatesOrDeletes + "]"; numOfOptimizedIndexing.inc(); return InternalEngine.IndexingStrategy.optimizedAppendOnly(index.version()); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index 2fedacabc93f3..a6406df0fbe7a 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -491,10 +491,10 @@ protected void restoreFiles(List filesToRecover, Store store) throws I while (offset < fileLength && error.get() == null) { final long requestSeqId = requestSeqIdTracker.generateSeqNo(); try { - requestSeqIdTracker.waitForOpsToComplete(requestSeqId - ccrSettings.getMaxConcurrentFileChunks()); + requestSeqIdTracker.waitForProcessedOpsToComplete(requestSeqId - ccrSettings.getMaxConcurrentFileChunks()); if (error.get() != null) { - requestSeqIdTracker.markSeqNoAsCompleted(requestSeqId); + requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); break; } @@ -514,7 +514,7 @@ protected void restoreFiles(List filesToRecover, Store store) throws I @Override public void onFailure(Exception e) { error.compareAndSet(null, Tuple.tuple(fileInfo.metadata(), e)); - requestSeqIdTracker.markSeqNoAsCompleted(requestSeqId); + requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); } @Override @@ -526,24 +526,24 @@ protected void doRun() throws Exception { throttleListener.accept(nanosPaused); final boolean lastChunk = r.getOffset() + actualChunkSize >= fileLength; multiFileWriter.writeFileChunk(fileInfo.metadata(), r.getOffset(), r.getChunk(), lastChunk); - requestSeqIdTracker.markSeqNoAsCompleted(requestSeqId); + requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); } }), e -> { error.compareAndSet(null, Tuple.tuple(fileInfo.metadata(), e)); - requestSeqIdTracker.markSeqNoAsCompleted(requestSeqId); + requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); } ), timeout, ThreadPool.Names.GENERIC, GetCcrRestoreFileChunkAction.NAME); remoteClient.execute(GetCcrRestoreFileChunkAction.INSTANCE, request, listener); } catch (Exception e) { error.compareAndSet(null, Tuple.tuple(fileInfo.metadata(), e)); - requestSeqIdTracker.markSeqNoAsCompleted(requestSeqId); + requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); } } } try { - requestSeqIdTracker.waitForOpsToComplete(requestSeqIdTracker.getMaxSeqNo()); + requestSeqIdTracker.waitForProcessedOpsToComplete(requestSeqIdTracker.getMaxSeqNo()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new ElasticsearchException(e); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java index f951c51844bdc..9642e0a641a2b 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java @@ -27,7 +27,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.CcrIntegTestCase; import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; @@ -44,8 +43,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -@TestLogging("org.elasticsearch.xpack.ccr:TRACE,org.elasticsearch.xpack.ccr.action.ShardChangesAction:DEBUG," - + "org.elasticsearch.index.shard:TRACE") public class FollowerFailOverIT extends CcrIntegTestCase { @Override @@ -286,7 +283,7 @@ public void testReadRequestsReturnLatestMappingVersion() throws Exception { IndexResponse indexResp = leaderCluster.client().prepareIndex("leader-index", "doc", "1") .setSource("{\"balance\": 100}", XContentType.JSON).setTimeout(TimeValue.ZERO).get(); assertThat(indexResp.getResult(), equalTo(DocWriteResponse.Result.CREATED)); - assertThat(indexShard.getGlobalCheckpoint(), equalTo(0L)); + assertThat(indexShard.getLastKnownGlobalCheckpoint(), equalTo(0L)); // Make sure at least one read-request which requires mapping sync is completed. assertBusy(() -> { FollowStatsAction.StatsResponses responses = diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java index e8b21f05c5c2d..9f6850fe20fc7 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java @@ -67,7 +67,7 @@ public void testGetOperations() throws Exception { int size = max - min + 1; final Translog.Operation[] operations = ShardChangesAction.getOperations( indexShard, - indexShard.getGlobalCheckpoint(), + indexShard.getLastKnownGlobalCheckpoint(), min, size, indexShard.getHistoryUUID(), @@ -83,7 +83,7 @@ public void testGetOperations() throws Exception { IllegalStateException.class, () -> ShardChangesAction.getOperations( indexShard, - indexShard.getGlobalCheckpoint(), + indexShard.getLastKnownGlobalCheckpoint(), numWrites, numWrites + 1, indexShard.getHistoryUUID(), @@ -92,18 +92,19 @@ public void testGetOperations() throws Exception { Locale.ROOT, "not exposing operations from [%d] greater than the global checkpoint [%d]", numWrites, - indexShard.getGlobalCheckpoint()); + indexShard.getLastKnownGlobalCheckpoint()); assertThat(e, hasToString(containsString(message))); } // get operations for a range some operations do not exist: - Translog.Operation[] operations = ShardChangesAction.getOperations(indexShard, indexShard.getGlobalCheckpoint(), + Translog.Operation[] operations = ShardChangesAction.getOperations(indexShard, indexShard.getLastKnownGlobalCheckpoint(), numWrites - 10, numWrites + 10, indexShard.getHistoryUUID(), new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES)); assertThat(operations.length, equalTo(10)); // Unexpected history UUID: Exception e = expectThrows(IllegalStateException.class, () -> ShardChangesAction.getOperations(indexShard, - indexShard.getGlobalCheckpoint(), 0, 10, "different-history-uuid", new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES))); + indexShard.getLastKnownGlobalCheckpoint(), 0, 10, "different-history-uuid", + new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES))); assertThat(e.getMessage(), equalTo("unexpected history uuid, expected [different-history-uuid], actual [" + indexShard.getHistoryUUID() + "]")); @@ -112,7 +113,7 @@ public void testGetOperations() throws Exception { final long fromSeqNo = randomLongBetween(Long.MIN_VALUE, -1); final int batchSize = randomIntBetween(0, Integer.MAX_VALUE); final IllegalArgumentException invalidRangeError = expectThrows(IllegalArgumentException.class, - () -> ShardChangesAction.getOperations(indexShard, indexShard.getGlobalCheckpoint(), + () -> ShardChangesAction.getOperations(indexShard, indexShard.getLastKnownGlobalCheckpoint(), fromSeqNo, batchSize, indexShard.getHistoryUUID(), new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES))); assertThat(invalidRangeError.getMessage(), equalTo("Invalid range; from_seqno [" + fromSeqNo + "], to_seqno [" + (fromSeqNo + batchSize - 1) + "]")); @@ -125,7 +126,8 @@ public void testGetOperationsWhenShardNotStarted() throws Exception { ShardRouting shardRouting = TestShardRouting.newShardRouting("index", 0, "_node_id", true, ShardRoutingState.INITIALIZING); Mockito.when(indexShard.routingEntry()).thenReturn(shardRouting); expectThrows(IndexShardNotStartedException.class, () -> ShardChangesAction.getOperations(indexShard, - indexShard.getGlobalCheckpoint(), 0, 1, indexShard.getHistoryUUID(), new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES))); + indexShard.getLastKnownGlobalCheckpoint(), 0, 1, indexShard.getHistoryUUID(), + new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES))); } public void testGetOperationsExceedByteLimit() throws Exception { @@ -142,7 +144,7 @@ public void testGetOperationsExceedByteLimit() throws Exception { } final IndexShard indexShard = indexService.getShard(0); - final Translog.Operation[] operations = ShardChangesAction.getOperations(indexShard, indexShard.getGlobalCheckpoint(), + final Translog.Operation[] operations = ShardChangesAction.getOperations(indexShard, indexShard.getLastKnownGlobalCheckpoint(), 0, 12, indexShard.getHistoryUUID(), new ByteSizeValue(256, ByteSizeUnit.BYTES)); assertThat(operations.length, equalTo(12)); assertThat(operations[0].seqNo(), equalTo(0L)); @@ -172,7 +174,7 @@ public void testGetOperationsAlwaysReturnAtLeastOneOp() throws Exception { final IndexShard indexShard = indexService.getShard(0); final Translog.Operation[] operations = ShardChangesAction.getOperations( - indexShard, indexShard.getGlobalCheckpoint(), 0, 1, indexShard.getHistoryUUID(), ByteSizeValue.ZERO); + indexShard, indexShard.getLastKnownGlobalCheckpoint(), 0, 1, indexShard.getHistoryUUID(), ByteSizeValue.ZERO); assertThat(operations.length, equalTo(1)); assertThat(operations[0].seqNo(), equalTo(0L)); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java index d2b424dc66fa9..21d5d3547b57c 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java @@ -139,13 +139,13 @@ protected void innerSendBulkShardOperationsRequest( Consumer handler, Consumer errorHandler) { for(Translog.Operation op : operations) { - tracker.markSeqNoAsCompleted(op.seqNo()); + tracker.markSeqNoAsProcessed(op.seqNo()); } receivedOperations.addAll(operations); // Emulate network thread and avoid SO: final BulkShardOperationsResponse response = new BulkShardOperationsResponse(); - response.setGlobalCheckpoint(tracker.getCheckpoint()); + response.setGlobalCheckpoint(tracker.getProcessedCheckpoint()); response.setMaxSeqNo(tracker.getMaxSeqNo()); threadPool.generic().execute(() -> handler.accept(response)); } @@ -180,7 +180,7 @@ protected void innerSendShardChangesRequest(long from, int maxOperationCount, Co } } else { assert from >= testRun.finalExpectedGlobalCheckpoint; - final long globalCheckpoint = tracker.getCheckpoint(); + final long globalCheckpoint = tracker.getProcessedCheckpoint(); final long maxSeqNo = tracker.getMaxSeqNo(); handler.accept(new ShardChangesAction.Response( 0L, diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index 9da7e1522d2a3..f88b6542392c8 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -105,7 +105,8 @@ public void testSimpleCcrReplication() throws Exception { leaderGroup.assertAllEqual(docCount); Set indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary()); assertBusy(() -> { - assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); + assertThat(followerGroup.getPrimary().getLastKnownGlobalCheckpoint(), + equalTo(leaderGroup.getPrimary().getLastKnownGlobalCheckpoint())); followerGroup.assertAllEqual(indexedDocIds.size()); }); for (IndexShard shard : followerGroup) { @@ -119,7 +120,8 @@ public void testSimpleCcrReplication() throws Exception { } leaderGroup.syncGlobalCheckpoint(); assertBusy(() -> { - assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); + assertThat(followerGroup.getPrimary().getLastKnownGlobalCheckpoint(), + equalTo(leaderGroup.getPrimary().getLastKnownGlobalCheckpoint())); followerGroup.assertAllEqual(indexedDocIds.size() - deleteDocIds.size()); }); shardFollowTask.markAsCompleted(); @@ -192,7 +194,8 @@ public void testChangeLeaderHistoryUUID() throws Exception { leaderGroup.assertAllEqual(docCount); Set indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary()); assertBusy(() -> { - assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); + assertThat(followerGroup.getPrimary().getLastKnownGlobalCheckpoint(), + equalTo(leaderGroup.getPrimary().getLastKnownGlobalCheckpoint())); followerGroup.assertAllEqual(indexedDocIds.size()); }); @@ -235,7 +238,8 @@ public void testChangeFollowerHistoryUUID() throws Exception { leaderGroup.assertAllEqual(docCount); Set indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary()); assertBusy(() -> { - assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); + assertThat(followerGroup.getPrimary().getLastKnownGlobalCheckpoint(), + equalTo(leaderGroup.getPrimary().getLastKnownGlobalCheckpoint())); followerGroup.assertAllEqual(indexedDocIds.size()); }); @@ -282,11 +286,12 @@ public void testRetryBulkShardOperations() throws Exception { // Simulates some bulk requests are completed on the primary and replicated to some (but all) replicas of the follower // but the primary of the follower crashed before these requests completed. for (int numBulks = between(1, 5), i = 0; i < numBulks; i++) { - long fromSeqNo = randomLongBetween(0, leadingPrimary.getGlobalCheckpoint()); - long toSeqNo = randomLongBetween(fromSeqNo, leadingPrimary.getGlobalCheckpoint()); + long fromSeqNo = randomLongBetween(0, leadingPrimary.getLastKnownGlobalCheckpoint()); + long toSeqNo = randomLongBetween(fromSeqNo, leadingPrimary.getLastKnownGlobalCheckpoint()); int numOps = Math.toIntExact(toSeqNo + 1 - fromSeqNo); - Translog.Operation[] ops = ShardChangesAction.getOperations(leadingPrimary, leadingPrimary.getGlobalCheckpoint(), - fromSeqNo, numOps, leadingPrimary.getHistoryUUID(), new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES)); + Translog.Operation[] ops = ShardChangesAction.getOperations(leadingPrimary, + leadingPrimary.getLastKnownGlobalCheckpoint(), fromSeqNo, numOps, leadingPrimary.getHistoryUUID(), + new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES)); IndexShard followingPrimary = followerGroup.getPrimary(); TransportWriteAction.WritePrimaryResult primaryResult = @@ -296,7 +301,7 @@ public void testRetryBulkShardOperations() throws Exception { for (IndexShard replica : randomSubsetOf(followerGroup.getReplicas())) { final PlainActionFuture permitFuture = new PlainActionFuture<>(); replica.acquireReplicaOperationPermit(followingPrimary.getOperationPrimaryTerm(), - followingPrimary.getGlobalCheckpoint(), followingPrimary.getMaxSeqNoOfUpdatesOrDeletes(), + followingPrimary.getLastKnownGlobalCheckpoint(), followingPrimary.getMaxSeqNoOfUpdatesOrDeletes(), permitFuture, ThreadPool.Names.SAME, primaryResult); try (Releasable ignored = permitFuture.get()) { TransportBulkShardOperationsAction.shardOperationOnReplica(primaryResult.replicaRequest(), replica, logger); @@ -308,13 +313,14 @@ public void testRetryBulkShardOperations() throws Exception { ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); shardFollowTask.start(followerGroup.getPrimary().getHistoryUUID(), - leadingPrimary.getGlobalCheckpoint(), + leadingPrimary.getLastKnownGlobalCheckpoint(), leadingPrimary.getMaxSeqNoOfUpdatesOrDeletes(), followerSeqNoStats.getGlobalCheckpoint(), followerSeqNoStats.getMaxSeqNo()); try { assertBusy(() -> { - assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leadingPrimary.getGlobalCheckpoint())); + assertThat(followerGroup.getPrimary().getLastKnownGlobalCheckpoint(), + equalTo(leadingPrimary.getLastKnownGlobalCheckpoint())); assertConsistentHistoryBetweenLeaderAndFollower(leaderGroup, followerGroup, true); }); } finally { @@ -380,9 +386,9 @@ public void testSimpleRemoteRecovery() throws Exception { ShardFollowNodeTask followTask = createShardFollowTask(leader, follower); followTask.start( follower.getPrimary().getHistoryUUID(), - leader.getPrimary().getGlobalCheckpoint(), + leader.getPrimary().getLastKnownGlobalCheckpoint(), leader.getPrimary().seqNoStats().getMaxSeqNo(), - follower.getPrimary().getGlobalCheckpoint(), + follower.getPrimary().getLastKnownGlobalCheckpoint(), follower.getPrimary().seqNoStats().getMaxSeqNo() ); leader.appendDocs(between(0, 100)); @@ -403,9 +409,9 @@ public void testRetentionLeaseManagement() throws Exception { final ShardFollowNodeTask task = createShardFollowTask(leader, follower); task.start( follower.getPrimary().getHistoryUUID(), - leader.getPrimary().getGlobalCheckpoint(), + leader.getPrimary().getLastKnownGlobalCheckpoint(), leader.getPrimary().seqNoStats().getMaxSeqNo(), - follower.getPrimary().getGlobalCheckpoint(), + follower.getPrimary().getLastKnownGlobalCheckpoint(), follower.getPrimary().seqNoStats().getMaxSeqNo()); final Scheduler.Cancellable renewable = task.getRenewable(); assertNotNull(renewable); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java index 856b6da2f9d7e..43302a5177ed7 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java @@ -142,7 +142,7 @@ public void testPrimaryResultIncludeOnlyAppliedOperations() throws Exception { newPrimary.getHistoryUUID(), Stream.concat(secondBulk.stream(), existingOps.stream()).collect(Collectors.toList()), seqno, newPrimary, logger); final long newPrimaryTerm = newPrimary.getOperationPrimaryTerm(); - final long globalCheckpoint = newPrimary.getGlobalCheckpoint(); + final long globalCheckpoint = newPrimary.getLastKnownGlobalCheckpoint(); final List appliedOperations = Stream.concat( secondBulk.stream().map(op -> rewriteOperationWithPrimaryTerm(op, newPrimaryTerm)), existingOps.stream().filter(op -> op.seqNo() > globalCheckpoint).map(op -> rewriteOperationWithPrimaryTerm(op, oldPrimaryTerm)) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java index 4a56d6370eb91..98bfa1b2068bb 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java @@ -337,7 +337,7 @@ public void testBasicOptimization() throws Exception { for (int i = 0; i < numDocs; i++) { leader.index(indexForPrimary(Integer.toString(i))); } - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(-1L)); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(numDocs)); assertThat(getDocIds(follower, true), equalTo(getDocIds(leader, true))); @@ -350,7 +350,7 @@ public void testBasicOptimization() throws Exception { leader.delete(deleteForPrimary(Integer.toString(i))); } } - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(leader.getMaxSeqNoOfUpdatesOrDeletes())); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(numDocs)); assertThat(getDocIds(follower, true), equalTo(getDocIds(leader, true))); @@ -362,7 +362,7 @@ public void testBasicOptimization() throws Exception { docIds.add(docId); leader.index(indexForPrimary(docId)); } - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(leader.getMaxSeqNoOfUpdatesOrDeletes())); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(numDocs + moreDocs)); assertThat(getDocIds(follower, true), equalTo(getDocIds(leader, true))); @@ -378,7 +378,7 @@ public void testOptimizeAppendOnly() throws Exception { runFollowTest((leader, follower) -> { EngineTestCase.concurrentlyApplyOps(ops, leader); assertThat(follower.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(-1L)); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo((long) numOps)); }); } @@ -396,13 +396,13 @@ public void testOptimizeMultipleVersions() throws Exception { Randomness.shuffle(ops); runFollowTest((leader, follower) -> { EngineTestCase.concurrentlyApplyOps(ops, leader); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); final List appendOps = new ArrayList<>(); for (int numAppends = scaledRandomIntBetween(0, 100), i = 0; i < numAppends; i++) { appendOps.add(indexForPrimary("append-" + i)); } EngineTestCase.concurrentlyApplyOps(appendOps, leader); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), greaterThanOrEqualTo((long) appendOps.size())); }); } @@ -410,19 +410,19 @@ public void testOptimizeMultipleVersions() throws Exception { public void testOptimizeSingleDocSequentially() throws Exception { runFollowTest((leader, follower) -> { leader.index(indexForPrimary("id")); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(1L)); leader.delete(deleteForPrimary("id")); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(1L)); leader.index(indexForPrimary("id")); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(2L)); leader.index(indexForPrimary("id")); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(2L)); }); } @@ -432,20 +432,20 @@ public void testOptimizeSingleDocConcurrently() throws Exception { Randomness.shuffle(ops); runFollowTest((leader, follower) -> { EngineTestCase.concurrentlyApplyOps(ops, leader); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(getDocIds(follower, true), equalTo(getDocIds(leader, true))); long numOptimized = follower.getNumberOfOptimizedIndexing(); leader.delete(deleteForPrimary("id")); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(numOptimized)); leader.index(indexForPrimary("id")); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(numOptimized + 1L)); leader.index(indexForPrimary("id")); - EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getProcessedLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(numOptimized + 1L)); }); } @@ -454,7 +454,7 @@ private void runFollowTest(CheckedBiConsumer wrappedTask = (leader, follower) -> { Thread[] threads = new Thread[between(1, 8)]; AtomicBoolean taskIsCompleted = new AtomicBoolean(); - AtomicLong lastFetchedSeqNo = new AtomicLong(follower.getLocalCheckpoint()); + AtomicLong lastFetchedSeqNo = new AtomicLong(follower.getProcessedLocalCheckpoint()); CountDownLatch latch = new CountDownLatch(threads.length + 1); for (int i = 0; i < threads.length; i++) { threads[i] = new Thread(() -> { @@ -472,7 +472,7 @@ private void runFollowTest(CheckedBiConsumer docs = new ArrayList<>(); Set foundResourceIds = new HashSet<>(); + long totalHitCount = response.getHits().getTotalHits().value; for (SearchHit hit : response.getHits().getHits()) { BytesReference docSource = hit.getSourceRef(); try (InputStream stream = docSource.streamInput(); @@ -115,7 +116,7 @@ public void onResponse(SearchResponse response) { if (requiredMatches.hasUnmatchedIds()) { listener.onFailure(notFoundException(requiredMatches.unmatchedIdsString())); } else { - listener.onResponse(new QueryPage<>(docs, docs.size(), getResultsField())); + listener.onResponse(new QueryPage<>(docs, totalHitCount, getResultsField())); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsAction.java index 7cee5dbcbf527..d8bcf730012c6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsAction.java @@ -91,8 +91,8 @@ public static class Response extends AbstractGetResourcesResponse transformConfigs) { - super(new QueryPage<>(transformConfigs, transformConfigs.size(), DataFrameField.TRANSFORMS)); + public Response(List transformConfigs, long count) { + super(new QueryPage<>(transformConfigs, count, DataFrameField.TRANSFORMS)); } public Response() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java index 78ce0ee58c961..a3d195967e9c7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.dataframe.action; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.TaskOperationFailure; @@ -21,8 +22,10 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.tasks.Task; import org.elasticsearch.xpack.core.action.util.PageParams; +import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; +import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; import java.io.IOException; import java.util.Collections; @@ -137,32 +140,52 @@ public boolean equals(Object obj) { } public static class Response extends BaseTasksResponse implements ToXContentObject { - private List transformsStateAndStats; + private final QueryPage transformsStateAndStats; - public Response(List transformsStateAndStats) { - super(Collections.emptyList(), Collections.emptyList()); - this.transformsStateAndStats = transformsStateAndStats; + public Response(List transformStateAndStats, long count) { + this(new QueryPage<>(transformStateAndStats, count, DataFrameField.TRANSFORMS)); } - public Response(List transformsStateAndStats, List taskFailures, - List nodeFailures) { + public Response(List transformStateAndStats, + long count, + List taskFailures, + List nodeFailures) { + this(new QueryPage<>(transformStateAndStats, count, DataFrameField.TRANSFORMS), taskFailures, nodeFailures); + } + + private Response(QueryPage transformsStateAndStats) { + this(transformsStateAndStats, Collections.emptyList(), Collections.emptyList()); + } + + private Response(QueryPage transformsStateAndStats, + List taskFailures, + List nodeFailures) { super(taskFailures, nodeFailures); - this.transformsStateAndStats = transformsStateAndStats; + this.transformsStateAndStats = ExceptionsHelper.requireNonNull(transformsStateAndStats, "transformsStateAndStats"); } public Response(StreamInput in) throws IOException { super(in); - transformsStateAndStats = in.readList(DataFrameTransformStateAndStats::new); + if (in.getVersion().onOrAfter(Version.V_7_3_0)) { + transformsStateAndStats = new QueryPage<>(in, DataFrameTransformStateAndStats::new); + } else { + List stats = in.readList(DataFrameTransformStateAndStats::new); + transformsStateAndStats = new QueryPage<>(stats, stats.size(), DataFrameField.TRANSFORMS); + } } public List getTransformsStateAndStats() { - return transformsStateAndStats; + return transformsStateAndStats.results(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeList(transformsStateAndStats); + if (out.getVersion().onOrAfter(Version.V_7_3_0)) { + transformsStateAndStats.writeTo(out); + } else { + out.writeList(transformsStateAndStats.results()); + } } @Override @@ -174,8 +197,7 @@ public void readFrom(StreamInput in) { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); toXContentCommon(builder, params); - builder.field(DataFrameField.COUNT.getPreferredName(), transformsStateAndStats.size()); - builder.field(DataFrameField.TRANSFORMS.getPreferredName(), transformsStateAndStats); + transformsStateAndStats.doXContentBody(builder, params); builder.endObject(); return builder; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformAction.java index 90e0adba16269..7a2e05798908c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformAction.java @@ -24,10 +24,11 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.xpack.core.dataframe.transforms.DestConfig; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -66,8 +67,20 @@ public Request(StreamInput in) throws IOException { public static Request fromXContent(final XContentParser parser) throws IOException { Map content = parser.map(); - // Destination and ID are not required for Preview, so we just supply our own - content.put(DataFrameField.DESTINATION.getPreferredName(), Collections.singletonMap("index", "unused-transform-preview-index")); + // dest.index and ID are not required for Preview, so we just supply our own + Map tempDestination = new HashMap<>(); + tempDestination.put(DestConfig.INDEX.getPreferredName(), "unused-transform-preview-index"); + // Users can still provide just dest.pipeline to preview what their data would look like given the pipeline ID + Object providedDestination = content.get(DataFrameField.DESTINATION.getPreferredName()); + if (providedDestination instanceof Map) { + @SuppressWarnings("unchecked") + Map destMap = (Map)providedDestination; + String pipeline = destMap.get(DestConfig.PIPELINE.getPreferredName()); + if (pipeline != null) { + tempDestination.put(DestConfig.PIPELINE.getPreferredName(), pipeline); + } + } + content.put(DataFrameField.DESTINATION.getPreferredName(), tempDestination); content.put(DataFrameField.ID.getPreferredName(), "transform-preview"); try(XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(content); XContentParser newParser = XContentType.JSON diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DestConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DestConfig.java index 29b2b8c5dc0e1..282a3f9a04484 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DestConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DestConfig.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.dataframe.transforms; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -20,10 +21,12 @@ import java.util.Objects; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; public class DestConfig implements Writeable, ToXContentObject { public static final ParseField INDEX = new ParseField("index"); + public static final ParseField PIPELINE = new ParseField("pipeline"); public static final ConstructingObjectParser STRICT_PARSER = createParser(false); public static final ConstructingObjectParser LENIENT_PARSER = createParser(true); @@ -31,25 +34,37 @@ public class DestConfig implements Writeable, ToXContentObject { private static ConstructingObjectParser createParser(boolean lenient) { ConstructingObjectParser parser = new ConstructingObjectParser<>("data_frame_config_dest", lenient, - args -> new DestConfig((String)args[0])); + args -> new DestConfig((String)args[0], (String) args[1])); parser.declareString(constructorArg(), INDEX); + parser.declareString(optionalConstructorArg(), PIPELINE); return parser; } private final String index; + private final String pipeline; - public DestConfig(String index) { + public DestConfig(String index, String pipeline) { this.index = ExceptionsHelper.requireNonNull(index, INDEX.getPreferredName()); + this.pipeline = pipeline; } public DestConfig(final StreamInput in) throws IOException { index = in.readString(); + if (in.getVersion().onOrAfter(Version.CURRENT)) { + pipeline = in.readOptionalString(); + } else { + pipeline = null; + } } public String getIndex() { return index; } + public String getPipeline() { + return pipeline; + } + public boolean isValid() { return index.isEmpty() == false; } @@ -57,12 +72,18 @@ public boolean isValid() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); + if (out.getVersion().onOrAfter(Version.CURRENT)) { + out.writeOptionalString(pipeline); + } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(INDEX.getPreferredName(), index); + if (pipeline != null) { + builder.field(PIPELINE.getPreferredName(), pipeline); + } builder.endObject(); return builder; } @@ -77,12 +98,13 @@ public boolean equals(Object other) { } DestConfig that = (DestConfig) other; - return Objects.equals(index, that.index); + return Objects.equals(index, that.index) && + Objects.equals(pipeline, that.pipeline); } @Override public int hashCode(){ - return Objects.hash(index); + return Objects.hash(index, pipeline); } public static DestConfig fromXContent(final XContentParser parser, boolean lenient) throws IOException { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenEngineTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenEngineTests.java index 4cf21cfbecd19..b03631550befe 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenEngineTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenEngineTests.java @@ -188,7 +188,7 @@ private int addDocuments(AtomicLong globalCheckpoint, InternalEngine engine) thr if (rarely()) { engine.flush(); } - globalCheckpoint.set(engine.getLocalCheckpoint()); + globalCheckpoint.set(engine.getProcessedLocalCheckpoint()); } engine.syncTranslog(); return numDocsAdded; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java index 0f9afa3d44be6..f4f9c2ecbf9e6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java @@ -363,7 +363,7 @@ public void testFreezeEmptyIndexWithTranslogOps() throws Exception { final Index index = client().admin().cluster().prepareState().get().getState().metaData().index(indexName).getIndex(); final IndexService indexService = indicesService.indexService(index); assertThat(indexService.hasShard(0), is(true)); - assertThat(indexService.getShard(0).getGlobalCheckpoint(), greaterThanOrEqualTo(nbNoOps - 1L)); + assertThat(indexService.getShard(0).getLastKnownGlobalCheckpoint(), greaterThanOrEqualTo(nbNoOps - 1L)); }); assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest(indexName)).actionGet()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsActionResponseTests.java index d6e32bd46c439..4a63ff787ffda 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsActionResponseTests.java @@ -32,7 +32,7 @@ public void testInvalidTransforms() throws IOException { transforms.add(DataFrameTransformConfigTests.randomDataFrameTransformConfig()); transforms.add(DataFrameTransformConfigTests.randomInvalidDataFrameTransformConfig()); - Response r = new Response(transforms); + Response r = new Response(transforms, transforms.size()); XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); r.toXContent(builder, XContent.EMPTY_PARAMS); Map responseAsMap = createParser(builder).map(); @@ -52,7 +52,7 @@ public void testNoHeaderInResponse() throws IOException { transforms.add(DataFrameTransformConfigTests.randomDataFrameTransformConfig()); } - Response r = new Response(transforms); + Response r = new Response(transforms, transforms.size()); XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); r.toXContent(builder, XContent.EMPTY_PARAMS); Map responseAsMap = createParser(builder).map(); @@ -76,7 +76,7 @@ protected Response createTestInstance() { configs.add(DataFrameTransformConfigTests.randomDataFrameTransformConfig()); } - return new Response(configs); + return new Response(configs, randomNonNegativeLong()); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsActionResponseTests.java index bbe53b59a8ace..aa5c1a03ef2ea 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsActionResponseTests.java @@ -32,7 +32,7 @@ protected Response createTestInstance() { taskFailures.add(new TaskOperationFailure("node1", randomLongBetween(1, 10), new Exception("error"))); nodeFailures.add(new FailedNodeException("node1", "message", new Exception("error"))); } - return new Response(stats, taskFailures, nodeFailures); + return new Response(stats, randomLongBetween(stats.size(), 10_000_000L), taskFailures, nodeFailures); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformActionRequestTests.java index 0cfc659e50646..c3a921a90d26b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformActionRequestTests.java @@ -40,7 +40,7 @@ protected boolean supportsUnknownFields() { @Override protected Request createTestInstance() { DataFrameTransformConfig config = new DataFrameTransformConfig("transform-preview", randomSourceConfig(), - new DestConfig("unused-transform-preview-index"), + new DestConfig("unused-transform-preview-index", null), null, PivotConfigTests.randomPivotConfig(), null); return new Request(config); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DestConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DestConfigTests.java index b29fa46c34ede..094267ba4adea 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DestConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DestConfigTests.java @@ -17,7 +17,8 @@ public class DestConfigTests extends AbstractSerializingDataFrameTestCase indexStats = getAsMap(dataFrameIndex + "/_stats"); + assertEquals(27, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); + + // get and check some users + assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_0", 3.776978417); + assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_5", 3.72); + assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_11", 3.846153846); + assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_20", 3.769230769); + assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_26", 3.918918918); + + Map searchResult = getAsMap(dataFrameIndex + "/_search?q=reviewer:user_0"); + Integer actual = (Integer) ((List) XContentMapValues.extractValue("hits.hits._source.pipeline_field", searchResult)).get(0); + assertThat(actual, equalTo(pipelineValue)); + } + public void testHistogramPivot() throws Exception { String transformId = "simple_histogram_pivot"; String dataFrameIndex = "pivot_reviews_via_histogram"; @@ -138,38 +178,38 @@ public void testBiggerPivot() throws Exception { + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"},"; config += " \"pivot\": {" - + " \"group_by\": {" - + " \"reviewer\": {" - + " \"terms\": {" - + " \"field\": \"user_id\"" - + " } } }," - + " \"aggregations\": {" - + " \"avg_rating\": {" - + " \"avg\": {" - + " \"field\": \"stars\"" - + " } }," - + " \"sum_rating\": {" - + " \"sum\": {" - + " \"field\": \"stars\"" - + " } }," - + " \"cardinality_business\": {" - + " \"cardinality\": {" - + " \"field\": \"business_id\"" - + " } }," - + " \"min_rating\": {" - + " \"min\": {" - + " \"field\": \"stars\"" - + " } }," - + " \"max_rating\": {" - + " \"max\": {" - + " \"field\": \"stars\"" - + " } }," - + " \"count\": {" - + " \"value_count\": {" - + " \"field\": \"business_id\"" - + " } }" - + " } }" - + "}"; + + " \"group_by\": {" + + " \"reviewer\": {" + + " \"terms\": {" + + " \"field\": \"user_id\"" + + " } } }," + + " \"aggregations\": {" + + " \"avg_rating\": {" + + " \"avg\": {" + + " \"field\": \"stars\"" + + " } }," + + " \"sum_rating\": {" + + " \"sum\": {" + + " \"field\": \"stars\"" + + " } }," + + " \"cardinality_business\": {" + + " \"cardinality\": {" + + " \"field\": \"business_id\"" + + " } }," + + " \"min_rating\": {" + + " \"min\": {" + + " \"field\": \"stars\"" + + " } }," + + " \"max_rating\": {" + + " \"max\": {" + + " \"field\": \"stars\"" + + " } }," + + " \"count\": {" + + " \"value_count\": {" + + " \"field\": \"business_id\"" + + " } }" + + " } }" + + "}"; createDataframeTransformRequest.setJsonEntity(config); Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); @@ -260,7 +300,7 @@ public void testPreviewTransform() throws Exception { createPreviewRequest.setJsonEntity(config); Map previewDataframeResponse = entityAsMap(client().performRequest(createPreviewRequest)); - List> preview = (List>)previewDataframeResponse.get("preview"); + List> preview = (List>) previewDataframeResponse.get("preview"); // preview is limited to 100 assertThat(preview.size(), equalTo(100)); Set expectedTopLevelFields = new HashSet<>(Arrays.asList("user", "by_day")); @@ -268,6 +308,57 @@ public void testPreviewTransform() throws Exception { preview.forEach(p -> { Set keys = p.keySet(); assertThat(keys, equalTo(expectedTopLevelFields)); + Map nestedObj = (Map) p.get("user"); + keys = nestedObj.keySet(); + assertThat(keys, equalTo(expectedNestedFields)); + }); + } + + @SuppressWarnings("unchecked") + public void testPreviewTransformWithPipeline() throws Exception { + String pipelineId = "my-preview-pivot-pipeline"; + int pipelineValue = 42; + Request pipelineRequest = new Request("PUT", "/_ingest/pipeline/" + pipelineId); + pipelineRequest.setJsonEntity("{\n" + + " \"description\" : \"my pivot preview pipeline\",\n" + + " \"processors\" : [\n" + + " {\n" + + " \"set\" : {\n" + + " \"field\": \"pipeline_field\",\n" + + " \"value\": " + pipelineValue + + " }\n" + + " }\n" + + " ]\n" + + "}"); + client().performRequest(pipelineRequest); + + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME); + final Request createPreviewRequest = createRequestWithAuth("POST", DATAFRAME_ENDPOINT + "_preview", null); + + String config = "{ \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"} ," + + "\"dest\": {\"pipeline\": \"" + pipelineId + "\"}," + + " \"pivot\": {" + + " \"group_by\": {" + + " \"user.id\": {\"terms\": { \"field\": \"user_id\" }}," + + " \"by_day\": {\"date_histogram\": {\"fixed_interval\": \"1d\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-dd\"}}}," + + " \"aggregations\": {" + + " \"user.avg_rating\": {" + + " \"avg\": {" + + " \"field\": \"stars\"" + + " } } } }" + + "}"; + createPreviewRequest.setJsonEntity(config); + + Map previewDataframeResponse = entityAsMap(client().performRequest(createPreviewRequest)); + List> preview = (List>)previewDataframeResponse.get("preview"); + // preview is limited to 100 + assertThat(preview.size(), equalTo(100)); + Set expectedTopLevelFields = new HashSet<>(Arrays.asList("user", "by_day", "pipeline_field")); + Set expectedNestedFields = new HashSet<>(Arrays.asList("id", "avg_rating")); + preview.forEach(p -> { + Set keys = p.keySet(); + assertThat(keys, equalTo(expectedTopLevelFields)); + assertThat(p.get("pipeline_field"), equalTo(pipelineValue)); Map nestedObj = (Map)p.get("user"); keys = nestedObj.keySet(); assertThat(keys, equalTo(expectedNestedFields)); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java index bb82b6a040478..30435a8490328 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java @@ -147,12 +147,23 @@ protected void createPivotReviewsTransform(String transformId, String dataFrameI createPivotReviewsTransform(transformId, dataFrameIndex, query, null); } - protected void createPivotReviewsTransform(String transformId, String dataFrameIndex, String query, String authHeader) + protected void createPivotReviewsTransform(String transformId, String dataFrameIndex, String query, String pipeline) + throws IOException { + createPivotReviewsTransform(transformId, dataFrameIndex, query, pipeline, null); + } + + + protected void createPivotReviewsTransform(String transformId, String dataFrameIndex, String query, String pipeline, String authHeader) throws IOException { final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, authHeader); - String config = "{" - + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"},"; + String config = "{"; + + if (pipeline != null) { + config += " \"dest\": {\"index\":\"" + dataFrameIndex + "\", \"pipeline\":\"" + pipeline + "\"},"; + } else { + config += " \"dest\": {\"index\":\"" + dataFrameIndex + "\"},"; + } if (query != null) { config += " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\", \"query\":{" + query + "}},"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java index 7f320ff9aaf27..3efe8c4d012bb 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java @@ -119,7 +119,7 @@ protected void createReviewsIndex() throws Exception { public void testGetProgress() throws Exception { createReviewsIndex(); SourceConfig sourceConfig = new SourceConfig(REVIEWS_INDEX_NAME); - DestConfig destConfig = new DestConfig("unnecessary"); + DestConfig destConfig = new DestConfig("unnecessary", null); GroupConfig histgramGroupConfig = new GroupConfig(Collections.emptyMap(), Collections.singletonMap("every_50", new HistogramGroupSource("count", 50.0))); AggregatorFactories.Builder aggs = new AggregatorFactories.Builder(); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsAction.java index 00bc15b1db6c5..7d117b2bfb438 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsAction.java @@ -44,7 +44,7 @@ public TransportGetDataFrameTransformsAction(TransportService transportService, @Override protected void doExecute(Task task, Request request, ActionListener listener) { searchResources(request, ActionListener.wrap( - r -> listener.onResponse(new Response(r.results())), + r -> listener.onResponse(new Response(r.results(), r.count())), listener::onFailure )); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java index d814714ab6653..3c5678d2d5dda 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java @@ -73,7 +73,7 @@ protected Response newResponse(Request request, List tasks, List allFailedNodeExceptions = new ArrayList<>(failedNodeExceptions); allFailedNodeExceptions.addAll(tasks.stream().flatMap(r -> r.getNodeFailures().stream()).collect(Collectors.toList())); - return new Response(responses, taskOperationFailures, allFailedNodeExceptions); + return new Response(responses, responses.size(), taskOperationFailures, allFailedNodeExceptions); } @Override @@ -83,36 +83,47 @@ protected void taskOperation(Request request, DataFrameTransformTask task, Actio String nodeId = state.nodes().getLocalNode().getId(); if (task.isCancelled() == false) { transformsCheckpointService.getCheckpointStats(task.getTransformId(), task.getCheckpoint(), task.getInProgressCheckpoint(), - ActionListener.wrap(checkpointStats -> { - listener.onResponse(new Response(Collections.singletonList( - new DataFrameTransformStateAndStats(task.getTransformId(), task.getState(), task.getStats(), checkpointStats)))); - }, e -> { - listener.onResponse(new Response( - Collections.singletonList(new DataFrameTransformStateAndStats(task.getTransformId(), task.getState(), - task.getStats(), DataFrameTransformCheckpointingInfo.EMPTY)), + ActionListener.wrap(checkpointStats -> listener.onResponse(new Response( + Collections.singletonList(new DataFrameTransformStateAndStats(task.getTransformId(), + task.getState(), + task.getStats(), + checkpointStats)), + 1L)), + e -> listener.onResponse(new Response( + Collections.singletonList(new DataFrameTransformStateAndStats(task.getTransformId(), + task.getState(), + task.getStats(), + DataFrameTransformCheckpointingInfo.EMPTY)), + 1L, Collections.emptyList(), - Collections.singletonList(new FailedNodeException(nodeId, "Failed to retrieve checkpointing info", e)))); - })); + Collections.singletonList(new FailedNodeException(nodeId, "Failed to retrieve checkpointing info", e)))) + )); } else { - listener.onResponse(new Response(Collections.emptyList())); + listener.onResponse(new Response(Collections.emptyList(), 0L)); } } @Override protected void doExecute(Task task, Request request, ActionListener finalListener) { dataFrameTransformsConfigManager.expandTransformIds(request.getId(), request.getPageParams(), ActionListener.wrap( - ids -> { - request.setExpandedIds(ids); - request.setNodes(DataFrameNodes.dataFrameTaskNodes(ids, clusterService.state())); + hitsAndIds -> { + request.setExpandedIds(hitsAndIds.v2()); + request.setNodes(DataFrameNodes.dataFrameTaskNodes(hitsAndIds.v2(), clusterService.state())); super.doExecute(task, request, ActionListener.wrap( - response -> collectStatsForTransformsWithoutTasks(request, response, finalListener), + response -> collectStatsForTransformsWithoutTasks(request, response, ActionListener.wrap( + finalResponse -> finalListener.onResponse(new Response(finalResponse.getTransformsStateAndStats(), + hitsAndIds.v1(), + finalResponse.getTaskFailures(), + finalResponse.getNodeFailures())), + finalListener::onFailure + )), finalListener::onFailure )); }, e -> { // If the index to search, or the individual config is not there, just return empty if (e instanceof ResourceNotFoundException) { - finalListener.onResponse(new Response(Collections.emptyList())); + finalListener.onResponse(new Response(Collections.emptyList(), 0L)); } else { finalListener.onFailure(e); } @@ -165,7 +176,10 @@ private void collectStatsForTransformsWithoutTasks(Request request, // it can easily become arbitrarily ordered based on which transforms don't have a task or stats docs allStateAndStats.sort(Comparator.comparing(DataFrameTransformStateAndStats::getId)); - listener.onResponse(new Response(allStateAndStats, response.getTaskFailures(), response.getNodeFailures())); + listener.onResponse(new Response(allStateAndStats, + allStateAndStats.size(), + response.getTaskFailures(), + response.getNodeFailures())); }, e -> { if (e instanceof IndexNotFoundException) { diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java index dde9edb37e55c..d9d4b45be332e 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java @@ -6,8 +6,13 @@ package org.elasticsearch.xpack.dataframe.action; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ingest.SimulatePipelineAction; +import org.elasticsearch.action.ingest.SimulatePipelineRequest; +import org.elasticsearch.action.ingest.SimulatePipelineResponse; import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; @@ -16,7 +21,14 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestStatus; @@ -26,6 +38,7 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.action.PreviewDataFrameTransformAction; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; @@ -34,15 +47,19 @@ import org.elasticsearch.xpack.dataframe.transforms.pivot.AggregationResultUtils; import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; +import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.dataframe.transforms.DataFrameIndexer.COMPOSITE_AGGREGATION_NAME; public class TransportPreviewDataFrameTransformAction extends HandledTransportAction { + private static final Logger logger = LogManager.getLogger(TransportPreviewDataFrameTransformAction.class); private static final int NUMBER_OF_PREVIEW_BUCKETS = 100; private final XPackLicenseState licenseState; private final Client client; @@ -87,13 +104,41 @@ protected void doExecute(Task task, Pivot pivot = new Pivot(config.getPivotConfig()); - getPreview(pivot, config.getSource(), ActionListener.wrap( - previewResponse -> listener.onResponse(new PreviewDataFrameTransformAction.Response(previewResponse)), - listener::onFailure + getPreview(pivot, + config.getSource(), + config.getDestination().getPipeline(), + config.getDestination().getIndex(), + ActionListener.wrap( + previewResponse -> listener.onResponse(new PreviewDataFrameTransformAction.Response(previewResponse)), + error -> { + logger.error("Failure gathering preview", error); + listener.onFailure(error); + } )); } - private void getPreview(Pivot pivot, SourceConfig source, ActionListener>> listener) { + @SuppressWarnings("unchecked") + private void getPreview(Pivot pivot, + SourceConfig source, + String pipeline, + String dest, + ActionListener>> listener) { + ActionListener pipelineResponseActionListener = ActionListener.wrap( + simulatePipelineResponse -> { + List> response = new ArrayList<>(simulatePipelineResponse.getResults().size()); + for(var simulateDocumentResult : simulatePipelineResponse.getResults()) { + try(XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) { + XContentBuilder content = simulateDocumentResult.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); + Map tempMap = XContentHelper.convertToMap(BytesReference.bytes(content), + true, + XContentType.JSON).v2(); + response.add((Map)XContentMapValues.extractValue("doc._source", tempMap)); + } + } + listener.onResponse(response); + }, + listener::onFailure + ); pivot.deduceMappings(client, source, ActionListener.wrap( deducedMappings -> { ClientHelper.executeWithHeadersAsync(threadPool.getThreadContext().getHeaders(), @@ -103,17 +148,40 @@ private void getPreview(Pivot pivot, SourceConfig source, ActionListener { - try { final CompositeAggregation agg = r.getAggregations().get(COMPOSITE_AGGREGATION_NAME); DataFrameIndexerTransformStats stats = DataFrameIndexerTransformStats.withDefaultTransformId(); // remove all internal fields - List> results = pivot.extractResults(agg, deducedMappings, stats) - .peek(record -> { - record.keySet().removeIf(k -> k.startsWith("_")); - }).collect(Collectors.toList()); - listener.onResponse(results); + if (pipeline == null) { + List> results = pivot.extractResults(agg, deducedMappings, stats) + .peek(doc -> doc.keySet().removeIf(k -> k.startsWith("_"))) + .collect(Collectors.toList()); + listener.onResponse(results); + } else { + List> results = pivot.extractResults(agg, deducedMappings, stats) + .map(doc -> { + Map src = new HashMap<>(); + String id = (String) doc.get(DataFrameField.DOCUMENT_ID_FIELD); + doc.keySet().removeIf(k -> k.startsWith("_")); + src.put("_source", doc); + src.put("_id", id); + src.put("_index", dest); + return src; + }).collect(Collectors.toList()); + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + builder.field("docs", results); + builder.endObject(); + var pipelineRequest = new SimulatePipelineRequest(BytesReference.bytes(builder), XContentType.JSON); + pipelineRequest.setId(pipeline); + ClientHelper.executeAsyncWithOrigin(client, + ClientHelper.DATA_FRAME_ORIGIN, + SimulatePipelineAction.INSTANCE, + pipelineRequest, + pipelineResponseActionListener); + } + } } catch (AggregationResultUtils.AggregationExtractionException extractionException) { listener.onFailure( new ElasticsearchStatusException(extractionException.getMessage(), RestStatus.BAD_REQUEST)); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java index 65a82db84b912..c3ebb4e5460b6 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java @@ -85,12 +85,12 @@ protected void doExecute(Task task, StopDataFrameTransformAction.Request request } dataFrameTransformsConfigManager.expandTransformIds(request.getId(), new PageParams(0, 10_000), ActionListener.wrap( - expandedIds -> { - request.setExpandedIds(new HashSet<>(expandedIds)); - request.setNodes(DataFrameNodes.dataFrameTaskNodes(expandedIds, clusterService.state())); - super.doExecute(task, request, finalListener); - }, - listener::onFailure + hitsAndIds -> { + request.setExpandedIds(new HashSet<>(hitsAndIds.v2())); + request.setNodes(DataFrameNodes.dataFrameTaskNodes(hitsAndIds.v2(), clusterService.state())); + super.doExecute(task, request, finalListener); + }, + listener::onFailure )); } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java index e3972384e4b2a..63184fefef861 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java @@ -23,6 +23,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -196,13 +197,16 @@ public void getTransformConfiguration(String transformId, ActionListener> foundIdsListener) { + public void expandTransformIds(String transformIdsExpression, + PageParams pageParams, + ActionListener>> foundIdsListener) { String[] idTokens = ExpandedIdsMatcher.tokenizeExpression(transformIdsExpression); QueryBuilder queryBuilder = buildQueryFromTokenizedIds(idTokens, DataFrameTransformConfig.NAME); SearchRequest request = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME) .addSort(DataFrameField.ID.getPreferredName(), SortOrder.ASC) .setFrom(pageParams.getFrom()) + .setTrackTotalHits(true) .setSize(pageParams.getSize()) .setQuery(queryBuilder) // We only care about the `id` field, small optimization @@ -214,6 +218,7 @@ public void expandTransformIds(String transformIdsExpression, PageParams pagePar executeAsyncWithOrigin(client.threadPool().getThreadContext(), DATA_FRAME_ORIGIN, request, ActionListener.wrap( searchResponse -> { + long totalHits = searchResponse.getHits().getTotalHits().value; List ids = new ArrayList<>(searchResponse.getHits().getHits().length); for (SearchHit hit : searchResponse.getHits().getHits()) { BytesReference source = hit.getSourceRef(); @@ -235,7 +240,7 @@ public void expandTransformIds(String transformIdsExpression, PageParams pagePar requiredMatches.unmatchedIdsString()))); return; } - foundIdsListener.onResponse(ids); + foundIdsListener.onResponse(new Tuple<>(totalHits, ids)); }, foundIdsListener::onFailure ), client::search); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java index f2fc71da7f059..bb23bc9878f4c 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java @@ -179,6 +179,9 @@ private Stream processBucketsToIndexRequests(CompositeAggregation } IndexRequest request = new IndexRequest(indexName).source(builder).id(id); + if (transformConfig.getDestination().getPipeline() != null) { + request.setPipeline(transformConfig.getDestination().getPipeline()); + } return request; }); } diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java index 9c7af3efa5333..9b03e4502d155 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java @@ -8,6 +8,7 @@ import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.xpack.core.action.util.PageParams; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; @@ -159,7 +160,7 @@ public void testExpandIds() throws Exception { transformsConfigManager.expandTransformIds(transformConfig1.getId(), PageParams.defaultParams(), listener), - Collections.singletonList("transform1_expand"), + new Tuple<>(1L, Collections.singletonList("transform1_expand")), null, null); @@ -168,7 +169,7 @@ public void testExpandIds() throws Exception { transformsConfigManager.expandTransformIds("transform1_expand,transform2_expand", PageParams.defaultParams(), listener), - Arrays.asList("transform1_expand", "transform2_expand"), + new Tuple<>(2L, Arrays.asList("transform1_expand", "transform2_expand")), null, null); @@ -177,7 +178,7 @@ public void testExpandIds() throws Exception { transformsConfigManager.expandTransformIds("transform1*,transform2_expand,transform3_expand", PageParams.defaultParams(), listener), - Arrays.asList("transform1_expand", "transform2_expand", "transform3_expand"), + new Tuple<>(3L, Arrays.asList("transform1_expand", "transform2_expand", "transform3_expand")), null, null); @@ -186,7 +187,7 @@ public void testExpandIds() throws Exception { transformsConfigManager.expandTransformIds("_all", PageParams.defaultParams(), listener), - Arrays.asList("transform1_expand", "transform2_expand", "transform3_expand"), + new Tuple<>(3L, Arrays.asList("transform1_expand", "transform2_expand", "transform3_expand")), null, null); @@ -195,7 +196,7 @@ public void testExpandIds() throws Exception { transformsConfigManager.expandTransformIds("_all", new PageParams(0, 1), listener), - Collections.singletonList("transform1_expand"), + new Tuple<>(3L, Collections.singletonList("transform1_expand")), null, null); @@ -204,7 +205,7 @@ public void testExpandIds() throws Exception { transformsConfigManager.expandTransformIds("_all", new PageParams(1, 2), listener), - Arrays.asList("transform2_expand", "transform3_expand"), + new Tuple<>(3L, Arrays.asList("transform2_expand", "transform3_expand")), null, null); @@ -213,7 +214,7 @@ public void testExpandIds() throws Exception { transformsConfigManager.expandTransformIds("unknown,unknown2", new PageParams(1, 2), listener), - (List)null, + (Tuple>)null, null, e -> { assertThat(e, instanceOf(ResourceNotFoundException.class)); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java index 2d09d0184f489..a159b9f965e2a 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java @@ -29,7 +29,7 @@ import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; -import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkStep; import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; @@ -45,7 +45,7 @@ public class IndexLifecycleService implements ClusterStateListener, ClusterStateApplier, SchedulerEngine.Listener, Closeable, LocalNodeMasterListener { private static final Logger logger = LogManager.getLogger(IndexLifecycleService.class); - private static final Set IGNORE_ACTIONS_MAINTENANCE_REQUESTED = Collections.singleton(ShrinkAction.NAME); + private static final Set IGNORE_STEPS_MAINTENANCE_REQUESTED = Collections.singleton(ShrinkStep.NAME); private volatile boolean isMaster = false; private volatile TimeValue pollInterval; @@ -115,15 +115,15 @@ public void onMaster() { StepKey stepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); if (OperationMode.STOPPING == currentMode) { - if (stepKey != null && IGNORE_ACTIONS_MAINTENANCE_REQUESTED.contains(stepKey.getAction())) { - logger.info("waiting to stop ILM because index [{}] with policy [{}] is currently in action [{}]", - idxMeta.getIndex().getName(), policyName, stepKey.getAction()); + if (stepKey != null && IGNORE_STEPS_MAINTENANCE_REQUESTED.contains(stepKey.getName())) { + logger.info("waiting to stop ILM because index [{}] with policy [{}] is currently in step [{}]", + idxMeta.getIndex().getName(), policyName, stepKey.getName()); lifecycleRunner.maybeRunAsyncAction(clusterState, idxMeta, policyName, stepKey); - // ILM is trying to stop, but this index is in a Shrink action (or other dangerous action) so we can't stop + // ILM is trying to stop, but this index is in a Shrink step (or other dangerous step) so we can't stop safeToStop = false; } else { - logger.info("skipping policy execution for index [{}] with policy [{}] because ILM is stopping", - idxMeta.getIndex().getName(), policyName); + logger.info("skipping policy execution of step [{}] for index [{}] with policy [{}] because ILM is stopping", + stepKey == null ? "n/a" : stepKey.getName(), idxMeta.getIndex().getName(), policyName); } } else { lifecycleRunner.maybeRunAsyncAction(clusterState, idxMeta, policyName, stepKey); @@ -249,19 +249,19 @@ void triggerPolicies(ClusterState clusterState, boolean fromClusterStateChange) StepKey stepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); if (OperationMode.STOPPING == currentMode) { - if (stepKey != null && IGNORE_ACTIONS_MAINTENANCE_REQUESTED.contains(stepKey.getAction())) { - logger.info("waiting to stop ILM because index [{}] with policy [{}] is currently in action [{}]", - idxMeta.getIndex().getName(), policyName, stepKey.getAction()); + if (stepKey != null && IGNORE_STEPS_MAINTENANCE_REQUESTED.contains(stepKey.getName())) { + logger.info("waiting to stop ILM because index [{}] with policy [{}] is currently in step [{}]", + idxMeta.getIndex().getName(), policyName, stepKey.getName()); if (fromClusterStateChange) { lifecycleRunner.runPolicyAfterStateChange(policyName, idxMeta); } else { lifecycleRunner.runPeriodicStep(policyName, idxMeta); } - // ILM is trying to stop, but this index is in a Shrink action (or other dangerous action) so we can't stop + // ILM is trying to stop, but this index is in a Shrink step (or other dangerous step) so we can't stop safeToStop = false; } else { - logger.info("skipping policy execution for index [{}] with policy [{}] because ILM is stopping", - idxMeta.getIndex().getName(), policyName); + logger.info("skipping policy execution of step [{}] for index [{}] with policy [{}] because ILM is stopping", + stepKey == null ? "n/a" : stepKey.getName(), idxMeta.getIndex().getName(), policyName); } } else { if (fromClusterStateChange) { diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleServiceTests.java index 67affe9f74ce0..3757c1cd5fb4f 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleServiceTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; import org.elasticsearch.xpack.core.indexlifecycle.Phase; import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkStep; import org.elasticsearch.xpack.core.indexlifecycle.Step; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; import org.junit.After; @@ -58,6 +59,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -148,7 +150,7 @@ public void testStoppedModeSkip() { } public void testRequestedStopOnShrink() { - Step.StepKey mockShrinkStep = new Step.StepKey(randomAlphaOfLength(4), ShrinkAction.NAME, randomAlphaOfLength(5)); + Step.StepKey mockShrinkStep = new Step.StepKey(randomAlphaOfLength(4), ShrinkAction.NAME, ShrinkStep.NAME); String policyName = randomAlphaOfLengthBetween(1, 20); IndexLifecycleRunnerTests.MockClusterStateActionStep mockStep = new IndexLifecycleRunnerTests.MockClusterStateActionStep(mockShrinkStep, randomStepKey()); @@ -180,14 +182,67 @@ public void testRequestedStopOnShrink() { .build(); ClusterChangedEvent event = new ClusterChangedEvent("_source", currentState, ClusterState.EMPTY_STATE); - SetOnce executedShrink = new SetOnce<>(); + SetOnce changedOperationMode = new SetOnce<>(); doAnswer(invocationOnMock -> { - executedShrink.set(true); + changedOperationMode.set(true); return null; - }).when(clusterService).submitStateUpdateTask(anyString(), any(ExecuteStepsUpdateTask.class)); + }).when(clusterService).submitStateUpdateTask(eq("ilm_operation_mode_update"), any(OperationModeUpdateTask.class)); + indexLifecycleService.applyClusterState(event); + indexLifecycleService.triggerPolicies(currentState, true); + assertNull(changedOperationMode.get()); + } + + public void testRequestedStopInShrinkActionButNotShrinkStep() { + // test all the shrink action steps that ILM can be stopped during (basically all of them minus the actual shrink) + ShrinkAction action = new ShrinkAction(1); + action.toSteps(mock(Client.class), "warm", randomStepKey()).stream() + .map(sk -> sk.getKey().getName()) + .filter(name -> name.equals(ShrinkStep.NAME) == false) + .forEach(this::verifyCanStopWithStep); + } + + // Check that ILM can stop when in the shrink action on the provided step + private void verifyCanStopWithStep(String stoppableStep) { + Step.StepKey mockShrinkStep = new Step.StepKey(randomAlphaOfLength(4), ShrinkAction.NAME, stoppableStep); + String policyName = randomAlphaOfLengthBetween(1, 20); + IndexLifecycleRunnerTests.MockClusterStateActionStep mockStep = + new IndexLifecycleRunnerTests.MockClusterStateActionStep(mockShrinkStep, randomStepKey()); + MockAction mockAction = new MockAction(Collections.singletonList(mockStep)); + Phase phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction)); + LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Collections.singletonMap(phase.getName(), phase)); + SortedMap policyMap = new TreeMap<>(); + policyMap.put(policyName, new LifecyclePolicyMetadata(policy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(mockShrinkStep.getPhase()); + lifecycleState.setAction(mockShrinkStep.getAction()); + lifecycleState.setStep(mockShrinkStep.getName()); + IndexMetaData indexMetadata = IndexMetaData.builder(index.getName()) + .settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey(), policyName)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + ImmutableOpenMap.Builder indices = ImmutableOpenMap. builder() + .fPut(index.getName(), indexMetadata); + MetaData metaData = MetaData.builder() + .putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.STOPPING)) + .indices(indices.build()) + .persistentSettings(settings(Version.CURRENT).build()) + .build(); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .build(); + + ClusterChangedEvent event = new ClusterChangedEvent("_source", currentState, ClusterState.EMPTY_STATE); + SetOnce changedOperationMode = new SetOnce<>(); + doAnswer(invocationOnMock -> { + changedOperationMode.set(true); + return null; + }).when(clusterService).submitStateUpdateTask(eq("ilm_operation_mode_update"), any(OperationModeUpdateTask.class)); indexLifecycleService.applyClusterState(event); indexLifecycleService.triggerPolicies(currentState, true); - assertTrue(executedShrink.get()); + assertTrue(changedOperationMode.get()); } public void testRequestedStopOnSafeAction() { @@ -236,7 +291,7 @@ public void testRequestedStopOnSafeAction() { assertThat(task.getOperationMode(), equalTo(OperationMode.STOPPED)); moveToMaintenance.set(true); return null; - }).when(clusterService).submitStateUpdateTask(anyString(), any(OperationModeUpdateTask.class)); + }).when(clusterService).submitStateUpdateTask(eq("ilm_operation_mode_update"), any(OperationModeUpdateTask.class)); indexLifecycleService.applyClusterState(event); indexLifecycleService.triggerPolicies(currentState, randomBoolean()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 79f33be54ab22..a675f4a2e1a12 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -801,8 +801,8 @@ static long machineMemoryFromStats(OsStats stats) { if (containerLimitStr != null) { BigInteger containerLimit = new BigInteger(containerLimitStr); if ((containerLimit.compareTo(BigInteger.valueOf(mem)) < 0 && containerLimit.compareTo(BigInteger.ZERO) > 0) - // mem < 0 means the value couldn't be obtained for some reason - || (mem < 0 && containerLimit.compareTo(BigInteger.valueOf(Long.MAX_VALUE)) < 0)) { + // mem <= 0 means the value couldn't be obtained for some reason + || (mem <= 0 && containerLimit.compareTo(BigInteger.valueOf(Long.MAX_VALUE)) < 0)) { mem = containerLimit.longValue(); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/license/MachineLearningLicensingTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/license/MachineLearningLicensingTests.java index ae65ee1a06c46..19da0d1a682e5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/license/MachineLearningLicensingTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/license/MachineLearningLicensingTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.license.License.OperationMode; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.DeleteDatafeedAction; @@ -38,7 +37,6 @@ import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; -@TestLogging("org.elasticsearch.xpack.ml.action:DEBUG") public class MachineLearningLicensingTests extends BaseMlIntegTestCase { @Before diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java index 9504cbe7a7011..e83fcdb5af6fb 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java @@ -97,8 +97,8 @@ public void testNoAttributes_givenClash() { public void testMachineMemory_givenStatsFailure() throws IOException { OsStats stats = mock(OsStats.class); - when(stats.getMem()).thenReturn(new OsStats.Mem(-1, -1)); - assertEquals(-1L, MachineLearning.machineMemoryFromStats(stats)); + when(stats.getMem()).thenReturn(new OsStats.Mem(0, 0)); + assertEquals(0L, MachineLearning.machineMemoryFromStats(stats)); } public void testMachineMemory_givenNoCgroup() throws IOException { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java index 40249c0bc771e..b06b83dc3d144 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java @@ -25,7 +25,7 @@ import org.elasticsearch.persistent.PersistentTasksClusterService; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; -import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction.Response.DatafeedStats; @@ -36,7 +36,6 @@ import org.elasticsearch.xpack.core.ml.action.PutJobAction; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; -import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.job.config.Job; @@ -179,7 +178,6 @@ public void testCloseUnassignedJobAndDatafeed() throws Exception { assertTrue(closeJobResponse.isClosed()); } - @TestLogging("org.elasticsearch.xpack.ml.action:TRACE,org.elasticsearch.xpack.ml.process:TRACE") public void testJobRelocationIsMemoryAware() throws Exception { internalCluster().ensureAtLeastNumDataNodes(1); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/NetworkDisruptionIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/NetworkDisruptionIT.java index e05263014d33a..8a257baa3d628 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/NetworkDisruptionIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/NetworkDisruptionIT.java @@ -38,12 +38,6 @@ protected Collection> nodePlugins() { return plugins; } - // Remove this once the AwaitsFix below has been resolved - public void testDummy() { - assertTrue(true); - } - - @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/39858") public void testJobRelocation() throws Exception { internalCluster().ensureAtLeastNumDataNodes(5); ensureStableCluster(5); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java index 5f1ce1ddf7d0e..519a81fe7d0a3 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; @@ -335,9 +334,6 @@ public void testCloseJob() { assertEquals(0, manager.numberOfOpenJobs()); } - // DEBUG logging makes it possible to see exactly how the two threads - // interleaved in the AutodetectProcessManager.close() call - @TestLogging("org.elasticsearch.xpack.ml.job.process.autodetect:DEBUG") public void testCanCloseClosingJob() throws Exception { AtomicInteger numberOfCommunicatorCloses = new AtomicInteger(0); doAnswer(invocationOnMock -> { diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java index 4296bedfe0da3..2906adac892b0 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java @@ -329,7 +329,7 @@ private static NodeStats mockNodeStats() { final OsStats.Cgroup osCgroup = new OsStats.Cgroup("_cpu_acct_ctrl_group", ++iota, "_cpu_ctrl_group", ++iota, ++iota, osCpuStat, "_memory_ctrl_group", "2000000000", "1000000000"); - final OsStats.Mem osMem = new OsStats.Mem(no, no); + final OsStats.Mem osMem = new OsStats.Mem(0, 0); final OsStats.Swap osSwap = new OsStats.Swap(no, no); final OsStats os = new OsStats(no, osCpu, osMem, osSwap, osCgroup); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java index f5ad8b4f07ffb..2e6add5c4d943 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java @@ -31,7 +31,6 @@ import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.test.SecuritySettingsSource; import org.elasticsearch.test.SecuritySettingsSourceField; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.transport.Netty4Plugin; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; @@ -56,8 +55,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -@TestLogging("org.elasticsearch.cluster.service:TRACE,org.elasticsearch.action.search:TRACE," + - "org.elasticsearch.search:TRACE") public class LicensingTests extends SecurityIntegTestCase { private static final String ROLES = SecuritySettingsSource.TEST_ROLE + ":\n" + diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java index bfc7508857120..59bd9f04fedf4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java @@ -31,7 +31,6 @@ import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.test.SecuritySettingsSource; import org.elasticsearch.test.SecuritySettingsSourceField; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.authc.TokenMetaData; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; @@ -58,7 +57,6 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -@TestLogging("org.elasticsearch.xpack.security.authz.store.FileRolesStore:DEBUG") public class TokenAuthIntegTests extends SecurityIntegTestCase { @Override @@ -135,7 +133,6 @@ public void testTokenServiceCanRotateKeys() throws Exception { } } - @TestLogging("org.elasticsearch.xpack.security.authc:DEBUG") public void testExpiredTokensDeletedAfterExpiration() throws Exception { final RestHighLevelClient restClient = new TestRestHighLevelClient(); CreateTokenResponse response = restClient.security().createToken(CreateTokenRequest.passwordGrant( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java index 2614a1dc3d923..ff080be728db9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java @@ -20,7 +20,6 @@ import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.mocksocket.MockServerSocket; import org.elasticsearch.mocksocket.MockSocket; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.common.socket.SocketAccess; @@ -53,7 +52,6 @@ /** * Tests that the server sets properly load balance connections without throwing exceptions */ -@TestLogging("org.elasticsearch.xpack.security.authc.ldap.support:DEBUG") public class SessionFactoryLoadBalancingTests extends LdapTestCase { private ThreadPool threadPool; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java index d50663b9d7cab..29a9db2a9ea2b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java @@ -36,7 +36,6 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheRequest; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor; import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; @@ -72,7 +71,6 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -@TestLogging("org.elasticsearch.xpack.security.authz.store.NativePrivilegeStore:TRACE") public class NativePrivilegeStoreTests extends ESTestCase { private NativePrivilegeStore store; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java index 5d9c24ec91415..c4b75f1e73d38 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.transport.Transport; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.ssl.CertParsingUtils; @@ -50,7 +49,6 @@ * @see RestrictedTrustManager */ @ESIntegTestCase.ClusterScope(numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) -@TestLogging("org.elasticsearch.xpack.ssl.RestrictedTrustManager:DEBUG") public class SSLTrustRestrictionsTests extends SecurityIntegTestCase { private static final TimeValue MAX_WAIT_RELOAD = TimeValue.timeValueSeconds(1); diff --git a/x-pack/plugin/sql/qa/build.gradle b/x-pack/plugin/sql/qa/build.gradle index 14bc1faa3fa43..89f04562fffb2 100644 --- a/x-pack/plugin/sql/qa/build.gradle +++ b/x-pack/plugin/sql/qa/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.test.RunTask - description = 'Integration tests for SQL' apply plugin: 'elasticsearch.build' archivesBaseName = 'qa-sql' @@ -96,20 +94,20 @@ subprojects { if (project.name != 'security') { // The security project just configures its subprojects + apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.rest-test' - integTestCluster { + testClusters.integTest { + distribution = 'DEFAULT' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.ml.enabled', 'false' setting 'xpack.watcher.enabled', 'false' - setting 'script.max_compilations_rate', '1000/1m' } - task runqa(type: RunTask) { - setting 'xpack.monitoring.enabled', 'false' - setting 'xpack.ml.enabled', 'false' - setting 'xpack.watcher.enabled', 'false' - setting 'script.max_compilations_rate', '1000/1m' + task runqa { + doFirst { + println "Run with `-Dtestclusters.inspect.failure=true integTest` to leave the cluster running after failure" + } } } } diff --git a/x-pack/plugin/sql/qa/multi-node/build.gradle b/x-pack/plugin/sql/qa/multi-node/build.gradle index ce7616ad1d1ae..7949f36083a07 100644 --- a/x-pack/plugin/sql/qa/multi-node/build.gradle +++ b/x-pack/plugin/sql/qa/multi-node/build.gradle @@ -6,8 +6,8 @@ description = 'Run a subset of SQL tests against multiple nodes' * feel should need to be tested against more than one node. */ -integTestCluster { - numNodes = 2 +testClusters.integTest{ + numberOfNodes = 2 setting 'xpack.security.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' } diff --git a/x-pack/plugin/sql/qa/security/build.gradle b/x-pack/plugin/sql/qa/security/build.gradle index 33a4963c10376..2cf410ed3d908 100644 --- a/x-pack/plugin/sql/qa/security/build.gradle +++ b/x-pack/plugin/sql/qa/security/build.gradle @@ -29,38 +29,23 @@ subprojects { testCompile project(":x-pack:plugin:core") } - integTestCluster { + testClusters.integTest { + distribution = "DEFAULT" // Setup auditing so we can use it in some tests setting 'xpack.security.audit.enabled', 'true' setting 'xpack.security.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' // Setup roles used by tests - extraConfigFile 'roles.yml', '../roles.yml' + extraConfigFile 'roles.yml', mainProject.file('roles.yml') /* Setup the one admin user that we run the tests as. * Tests use "run as" to get different users. */ - setupCommand 'setupUser#test_admin', - 'bin/elasticsearch-users', 'useradd', 'test_admin', '-p', 'x-pack-test-password', '-r', 'superuser' - // Subprojects override the wait condition to work properly with security + user username: "test_admin", password: "x-pack-test-password" } - integTestRunner { - def today = new Date().format('yyyy-MM-dd') + integTest.runner { nonInputProperties.systemProperty 'tests.audit.logfile', - "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_audit.json" + "${ -> testClusters.integTest.singleNode().getAuditLog()}" nonInputProperties.systemProperty 'tests.audit.yesterday.logfile', - "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_audit-${today}.json" - } - - runqa { - // Setup auditing so we can use it in some tests - setting 'xpack.security.audit.enabled', 'true' - setting 'xpack.security.enabled', 'true' - setting 'xpack.license.self_generated.type', 'trial' - // Setup roles used by tests - extraConfigFile 'roles.yml', '../roles.yml' - /* Setup the one admin user that we run the tests as. - * Tests use "run as" to get different users. */ - setupCommand 'setupUser#test_admin', - 'bin/elasticsearch-users', 'useradd', 'test_admin', '-p', 'x-pack-test-password', '-r', 'superuser' + "${ -> testClusters.integTest.singleNode().getAuditLog().getParentFile()}/integTest_audit-${new Date().format('yyyy-MM-dd')}.json" } } diff --git a/x-pack/plugin/sql/qa/security/with-ssl/build.gradle b/x-pack/plugin/sql/qa/security/with-ssl/build.gradle index 19459bade97a8..d73a181bef10f 100644 --- a/x-pack/plugin/sql/qa/security/with-ssl/build.gradle +++ b/x-pack/plugin/sql/qa/security/with-ssl/build.gradle @@ -11,7 +11,7 @@ import java.security.KeyStore import java.security.SecureRandom // Tell the tests we're running with ssl enabled -integTestRunner { +integTest.runner { systemProperty 'tests.ssl.enabled', 'true' } @@ -143,78 +143,32 @@ forbiddenPatterns { sourceSets.test.resources.srcDir(keystoreDir) processTestResources.dependsOn(importNodeCertificateInClientKeyStore, importClientCertificateInNodeKeyStore) -integTestCluster.dependsOn(importClientCertificateInNodeKeyStore) +integTest.runner { + dependsOn(importClientCertificateInNodeKeyStore) + onlyIf { + // Do not attempt to form a cluster in a FIPS JVM, as doing so with a JKS keystore will fail. + // TODO Revisit this when SQL CLI client can handle key/certificate instead of only Keystores. + // https://github.com/elastic/elasticsearch/issues/32306 + project.inFipsJvm == false + } +} -integTestCluster { +testClusters.integTest { // The setup that we actually want + setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.http.ssl.enabled', 'true' setting 'xpack.security.transport.ssl.enabled', 'true' // ceremony to set up ssl setting 'xpack.security.transport.ssl.keystore.path', 'test-node.jks' - keystoreSetting 'xpack.security.transport.ssl.keystore.secure_password', 'keypass' setting 'xpack.security.http.ssl.keystore.path', 'test-node.jks' - keystoreSetting 'xpack.security.http.ssl.keystore.secure_password', 'keypass' + keystore 'xpack.security.transport.ssl.keystore.secure_password', 'keypass' + keystore 'xpack.security.http.ssl.keystore.secure_password', 'keypass' - setting 'xpack.license.self_generated.type', 'trial' // copy keystores into config/ extraConfigFile nodeKeystore.name, nodeKeystore extraConfigFile clientKeyStore.name, clientKeyStore - - // Override the wait condition to work properly with security and SSL - waitCondition = { NodeInfo node, AntBuilder ant -> - File tmpFile = new File(node.cwd, 'wait.success') - KeyStore keyStore = KeyStore.getInstance("JKS"); - keyStore.load(clientKeyStore.newInputStream(), 'keypass'.toCharArray()); - KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - kmf.init(keyStore, 'keypass'.toCharArray()); - TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); - tmf.init(keyStore); - SSLContext sslContext = SSLContext.getInstance("TLSv1.2"); - sslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), new SecureRandom()); - for (int i = 0; i < 10; i++) { - // we use custom wait logic here for HTTPS - HttpsURLConnection httpURLConnection = null; - try { - httpURLConnection = (HttpsURLConnection) new URL("https://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}&wait_for_status=yellow").openConnection(); - httpURLConnection.setSSLSocketFactory(sslContext.getSocketFactory()); - httpURLConnection.setRequestProperty("Authorization", "Basic " + - Base64.getEncoder().encodeToString("test_admin:x-pack-test-password".getBytes(StandardCharsets.UTF_8))); - httpURLConnection.setRequestMethod("GET"); - httpURLConnection.connect(); - if (httpURLConnection.getResponseCode() == 200) { - tmpFile.withWriter StandardCharsets.UTF_8.name(), { - it.write(httpURLConnection.getInputStream().getText(StandardCharsets.UTF_8.name())) - } - } - } catch (IOException e) { - if (i == 9) { - logger.error("final attempt of calling cluster health failed", e) - } else { - logger.debug("failed to call cluster health", e) - } - } finally { - if (httpURLConnection != null) { - httpURLConnection.disconnect(); - } - } - - // did not start, so wait a bit before trying again - Thread.sleep(500L); - } - - return tmpFile.exists() - } -} - -// Do not attempt to form a cluster in a FIPS JVM, as doing so with a JKS keystore will fail. -// TODO Revisit this when SQL CLI client can handle key/certificate instead of only Keystores. -// https://github.com/elastic/elasticsearch/issues/32306 -tasks.matching { it.name in ["integTestCluster#init", "integTestCluster#start", "integTestCluster#wait", "integTestRunner"] }.all { - onlyIf { - project.inFipsJvm == false - } } diff --git a/x-pack/plugin/sql/qa/security/without-ssl/build.gradle b/x-pack/plugin/sql/qa/security/without-ssl/build.gradle index ac748527da371..691fc1a631f16 100644 --- a/x-pack/plugin/sql/qa/security/without-ssl/build.gradle +++ b/x-pack/plugin/sql/qa/security/without-ssl/build.gradle @@ -1,17 +1,7 @@ -integTestRunner { +integTest.runner { systemProperty 'tests.ssl.enabled', 'false' } -integTestCluster { +testClusters.integTest { setting 'xpack.license.self_generated.type', 'trial' - waitCondition = { node, ant -> - File tmpFile = new File(node.cwd, 'wait.success') - ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", - dest: tmpFile.toString(), - username: 'test_admin', - password: 'x-pack-test-password', - ignoreerrors: true, - retries: 10) - return tmpFile.exists() - } } diff --git a/x-pack/plugin/sql/qa/single-node/build.gradle b/x-pack/plugin/sql/qa/single-node/build.gradle index 3a8b0ffde0ac5..137ffed68ad52 100644 --- a/x-pack/plugin/sql/qa/single-node/build.gradle +++ b/x-pack/plugin/sql/qa/single-node/build.gradle @@ -1,9 +1,4 @@ -integTestCluster { - setting 'xpack.security.enabled', 'false' - setting 'xpack.license.self_generated.type', 'trial' -} - -runqa { +testClusters.integTest { setting 'xpack.security.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DebugSqlSpec.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DebugSqlSpec.java index b51d66ace2e26..580fd9a28648b 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DebugSqlSpec.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DebugSqlSpec.java @@ -7,11 +7,8 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.elasticsearch.test.junit.annotations.TestLogging; - import java.util.List; -@TestLogging(JdbcTestUtils.SQL_TRACE) public abstract class DebugSqlSpec extends SqlSpecTestCase { @ParametersFactory(shuffle = false, argumentFormatting = PARAM_FORMATTING) public static List readScriptSpec() throws Exception { @@ -27,4 +24,4 @@ public DebugSqlSpec(String fileName, String groupName, String testName, Integer protected boolean logEsResultSet() { return true; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java index 96d8ffe455c6e..7c7288d6a3539 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java @@ -621,46 +621,7 @@ public void testBasicQueryText() throws IOException { } public void testNextPageText() throws IOException { - int size = 20; - String[] docs = new String[size]; - for (int i = 0; i < size; i++) { - docs[i] = "{\"text\":\"text" + i + "\", \"number\":" + i + "}\n"; - } - index(docs); - - String request = "{\"query\":\"SELECT text, number, number + 5 AS sum FROM test ORDER BY number\", \"fetch_size\":2}"; - - String cursor = null; - for (int i = 0; i < 20; i += 2) { - Tuple response; - if (i == 0) { - response = runSqlAsText(StringUtils.EMPTY, new StringEntity(request, ContentType.APPLICATION_JSON), "text/plain"); - } else { - response = runSqlAsText(StringUtils.EMPTY, new StringEntity("{\"cursor\":\"" + cursor + "\"}", - ContentType.APPLICATION_JSON), "text/plain"); - } - - StringBuilder expected = new StringBuilder(); - if (i == 0) { - expected.append(" text | number | sum \n"); - expected.append("---------------+---------------+---------------\n"); - } - expected.append(String.format(Locale.ROOT, "%-15s|%-15d|%-15d\n", "text" + i, i, i + 5)); - expected.append(String.format(Locale.ROOT, "%-15s|%-15d|%-15d\n", "text" + (i + 1), i + 1, i + 6)); - cursor = response.v2(); - assertEquals(expected.toString(), response.v1()); - assertNotNull(cursor); - } - Map expected = new HashMap<>(); - expected.put("rows", emptyList()); - assertResponse(expected, runSql(new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON), - StringUtils.EMPTY)); - - Map response = runSql(new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON), - "/close"); - assertEquals(true, response.get("succeeded")); - - assertEquals(0, getNumberOfSearchContexts("test")); + executeQueryWithNextPage("text/plain", " text | number | sum \n", "%-15s|%-15d|%-15d\n"); } // CSV/TSV tests @@ -702,6 +663,10 @@ public void testQueryWithoutHeaderInCSV() throws IOException { Tuple response = runSqlAsText(query, "text/csv; header=absent"); assertEquals(expected, response.v1()); } + + public void testNextPageCSV() throws IOException { + executeQueryWithNextPage("text/csv; header=present", "text,number,sum\r\n", "%s,%d,%d\r\n"); + } public void testQueryInTSV() throws IOException { index("{\"name\":" + toJson("first") + ", \"number\" : 1 }", @@ -720,6 +685,55 @@ public void testQueryInTSV() throws IOException { response = runSqlAsTextFormat(query, "tsv"); assertEquals(expected, response.v1()); } + + public void testNextPageTSV() throws IOException { + executeQueryWithNextPage("text/tab-separated-values", "text\tnumber\tsum\n", "%s\t%d\t%d\n"); + } + + private void executeQueryWithNextPage(String format, String expectedHeader, String expectedLineFormat) throws IOException { + int size = 20; + String[] docs = new String[size]; + for (int i = 0; i < size; i++) { + docs[i] = "{\"text\":\"text" + i + "\", \"number\":" + i + "}\n"; + } + index(docs); + + String request = "{\"query\":\"SELECT text, number, number + 5 AS sum FROM test ORDER BY number\", \"fetch_size\":2}"; + + String cursor = null; + for (int i = 0; i < 20; i += 2) { + Tuple response; + if (i == 0) { + response = runSqlAsText(StringUtils.EMPTY, new StringEntity(request, ContentType.APPLICATION_JSON), format); + } else { + response = runSqlAsText(StringUtils.EMPTY, new StringEntity("{\"cursor\":\"" + cursor + "\"}", + ContentType.APPLICATION_JSON), format); + } + + StringBuilder expected = new StringBuilder(); + if (i == 0) { + expected.append(expectedHeader); + if (format == "text/plain") { + expected.append("---------------+---------------+---------------\n"); + } + } + expected.append(String.format(Locale.ROOT, expectedLineFormat, "text" + i, i, i + 5)); + expected.append(String.format(Locale.ROOT, expectedLineFormat, "text" + (i + 1), i + 1, i + 6)); + cursor = response.v2(); + assertEquals(expected.toString(), response.v1()); + assertNotNull(cursor); + } + Map expected = new HashMap<>(); + expected.put("rows", emptyList()); + assertResponse(expected, runSql(new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON), + StringUtils.EMPTY)); + + Map response = runSql(new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON), + "/close"); + assertEquals(true, response.get("succeeded")); + + assertEquals(0, getNumberOfSearchContexts("test")); + } private Tuple runSqlAsText(String sql, String accept) throws IOException { return runSqlAsText(StringUtils.EMPTY, new StringEntity("{\"query\":\"" + sql + "\"}", ContentType.APPLICATION_JSON), accept); diff --git a/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys-geo.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys-geo.csv-spec index c9380fae2809e..6d165c33433df 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys-geo.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys-geo.csv-spec @@ -6,10 +6,10 @@ geoSysColumns SYS COLUMNS TABLE LIKE 'geo'; TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i|BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i| NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s -x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |city |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |1 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |location |114 |GEO_POINT |58 |16 |null |null |1 |null |null |114 |0 |null |2 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |location_no_dv |114 |GEO_POINT |58 |16 |null |null |1 |null |null |114 |0 |null |3 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |region |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |4 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |region_point |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |5 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |shape |114 |GEO_SHAPE |2147483647 |2147483647 |null |null |1 |null |null |114 |0 |null |6 |YES |null |null |null |null |NO |NO +integTest|null |geo |city |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |1 |YES |null |null |null |null |NO |NO +integTest|null |geo |location |114 |GEO_POINT |58 |16 |null |null |1 |null |null |114 |0 |null |2 |YES |null |null |null |null |NO |NO +integTest|null |geo |location_no_dv |114 |GEO_POINT |58 |16 |null |null |1 |null |null |114 |0 |null |3 |YES |null |null |null |null |NO |NO +integTest|null |geo |region |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |4 |YES |null |null |null |null |NO |NO +integTest|null |geo |region_point |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |5 |YES |null |null |null |null |NO |NO +integTest|null |geo |shape |114 |GEO_SHAPE |2147483647 |2147483647 |null |null |1 |null |null |114 |0 |null |6 |YES |null |null |null |null |NO |NO ; \ No newline at end of file diff --git a/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys.csv-spec index 1b13841c47273..6656c695ffe7b 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys.csv-spec @@ -7,16 +7,16 @@ SYS COLUMNS TABLE LIKE 'test\_emp' ESCAPE '\'; TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i| BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i | NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s ---------------+---------------+---------------+--------------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+----------------+-----------------+----------------+---------------+---------------+---------------+---------------+----------------+----------------+------------------ -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |4 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |5 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |7 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |8 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |11 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |4 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |5 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |7 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |8 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |11 |YES |null |null |null |null |NO |NO ; sysColumnsWithTableLikeNoEscape @@ -27,39 +27,39 @@ SYS COLUMNS TABLE LIKE 'test_emp'; TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i| BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i | NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s ---------------+---------------+---------------+--------------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+----------------+-----------------+----------------+---------------+---------------+---------------+---------------+----------------+----------------+------------------ -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |extra.info.gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |extra_gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |7 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |extra_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |8 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |11 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |12 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |13 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |14 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |15 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |16 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |extra.info.gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |extra_gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |7 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |extra_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |8 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |11 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |12 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |13 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |14 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |15 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |16 |YES |null |null |null |null |NO |NO ; sysColumnsWithCatalogAndLike -SYS COLUMNS CATALOG 'x-pack_plugin_sql_qa_single-node_integTestCluster' TABLE LIKE 'test\_emp\_copy' ESCAPE '\'; +SYS COLUMNS CATALOG 'integTest' TABLE LIKE 'test\_emp\_copy' ESCAPE '\'; TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i| BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i | NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s ---------------+---------------+---------------+-------------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+----------------+-----------------+----------------+---------------+---------------+---------------+---------------+----------------+----------------+------------------ -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|extra.info.gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|extra_gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |7 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|extra_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |8 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |11 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |12 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |13 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |14 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |15 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |16 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy|birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy|emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy|extra.info.gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy|extra_gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |7 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy|extra_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |8 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy|first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy|first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy|gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |11 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy|hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |12 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy|languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |13 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy|last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |14 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy|last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |15 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy|salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |16 |YES |null |null |null |null |NO |NO ; sysColumnsOnAliasWithTableLike @@ -67,19 +67,19 @@ SYS COLUMNS TABLE LIKE 'test\_alias' ESCAPE '\'; TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i| BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i | NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s ---------------+---------------+---------------+--------------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+----------------+-----------------+----------------+---------------+---------------+---------------+---------------+----------------+----------------+------------------ -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |extra.info.gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |extra_gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |7 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |extra_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |8 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |11 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |12 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |13 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |14 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |15 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |16 |YES |null |null |null |null |NO |NO +integTest |null |test_alias |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +integTest |null |test_alias |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +integTest |null |test_alias |extra.info.gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO +integTest |null |test_alias |extra_gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |7 |YES |null |null |null |null |NO |NO +integTest |null |test_alias |extra_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |8 |YES |null |null |null |null |NO |NO +integTest |null |test_alias |first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +integTest |null |test_alias |first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +integTest |null |test_alias |gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |11 |YES |null |null |null |null |NO |NO +integTest |null |test_alias |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |12 |YES |null |null |null |null |NO |NO +integTest |null |test_alias |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |13 |YES |null |null |null |null |NO |NO +integTest |null |test_alias |last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |14 |YES |null |null |null |null |NO |NO +integTest |null |test_alias |last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |15 |YES |null |null |null |null |NO |NO +integTest |null |test_alias |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |16 |YES |null |null |null |null |NO |NO ; sysColumnsAllTables @@ -87,35 +87,35 @@ SYS COLUMNS TABLE LIKE '%'; TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i| BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i | NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s ---------------+---------------+---------------+--------------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+----------------+-----------------+----------------+---------------+---------------+---------------+---------------+----------------+----------------+------------------ -x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |@timestamp |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |bytes_in |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |2 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |bytes_out |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |client_ip |12 |IP |0 |39 |null |null |1 |null |null |12 |0 |null |4 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |client_port |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |5 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |dest_ip |12 |IP |0 |39 |null |null |1 |null |null |12 |0 |null |6 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |id |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |7 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |status |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |8 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |4 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |5 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |7 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |8 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |11 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |extra.info.gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |extra_gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |7 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |extra_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |8 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |11 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |12 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |13 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |14 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |15 |YES |null |null |null |null |NO |NO -x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |16 |YES |null |null |null |null |NO |NO +integTest |null |logs |@timestamp |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +integTest |null |logs |bytes_in |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |2 |YES |null |null |null |null |NO |NO +integTest |null |logs |bytes_out |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +integTest |null |logs |client_ip |12 |IP |0 |39 |null |null |1 |null |null |12 |0 |null |4 |YES |null |null |null |null |NO |NO +integTest |null |logs |client_port |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |5 |YES |null |null |null |null |NO |NO +integTest |null |logs |dest_ip |12 |IP |0 |39 |null |null |1 |null |null |12 |0 |null |6 |YES |null |null |null |null |NO |NO +integTest |null |logs |id |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |7 |YES |null |null |null |null |NO |NO +integTest |null |logs |status |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |8 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |4 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |5 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |7 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |8 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +integTest |null |test_emp |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |11 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy |extra.info.gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy |extra_gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |7 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy |extra_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |8 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy |first_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy |first_name.keyword|12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy |gender |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |11 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |12 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |13 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy |last_name |12 |TEXT |2147483647 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |14 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy |last_name.keyword |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |15 |YES |null |null |null |null |NO |NO +integTest |null |test_emp_copy |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |16 |YES |null |null |null |null |NO |NO ; \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java index 62963a99b2a98..f4e3e006e70f9 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java @@ -224,7 +224,7 @@ String format(Cursor cursor, RestRequest request, SqlQueryResponse response) { boolean header = hasHeader(request); - if (header) { + if (header && (cursor == null || cursor == Cursor.EMPTY)) { row(sb, response.columns(), ColumnInfo::name); } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml index 7b5c4e8cb5664..98ef4039eafe4 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml @@ -99,6 +99,49 @@ setup: - match: { preview.2.time.max: "2017-02-18T01:01:00.000Z" } - match: { preview.2.time.min: "2017-02-18T01:01:00.000Z" } + - do: + ingest.put_pipeline: + id: "data_frame_simple_pipeline" + body: > + { + "processors": [ + { + "set" : { + "field" : "my_field", + "value": 42 + } + } + ] + } + - match: { acknowledged: true } + - do: + data_frame.preview_data_frame_transform: + body: > + { + "source": { "index": "airline-data" }, + "dest": { "pipeline": "data_frame_simple_pipeline" }, + "pivot": { + "group_by": { + "airline": {"terms": {"field": "airline"}}, + "by-hour": {"date_histogram": {"fixed_interval": "1h", "field": "time", "format": "yyyy-MM-dd HH"}}}, + "aggs": { + "avg_response": {"avg": {"field": "responsetime"}} + } + } + } + - match: { preview.0.airline: foo } + - match: { preview.0.by-hour: "2017-02-18 00" } + - match: { preview.0.avg_response: 1.0 } + - match: { preview.0.my_field: 42 } + - match: { preview.1.airline: bar } + - match: { preview.1.by-hour: "2017-02-18 01" } + - match: { preview.1.avg_response: 42.0 } + - match: { preview.1.my_field: 42 } + - match: { preview.2.airline: foo } + - match: { preview.2.by-hour: "2017-02-18 01" } + - match: { preview.2.avg_response: 42.0 } + - match: { preview.2.my_field: 42 } + --- "Test preview transform with invalid config": - do: @@ -127,7 +170,6 @@ setup: "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} } } - --- "Test preview returns bad request with invalid agg": - do: @@ -161,4 +203,21 @@ setup: } } } - +--- +"Test preview with missing pipeline": + - do: + catch: bad_request + data_frame.preview_data_frame_transform: + body: > + { + "source": { "index": "airline-data" }, + "dest": { "pipeline": "missing-pipeline" }, + "pivot": { + "group_by": { + "time": {"date_histogram": {"fixed_interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, + "aggs": { + "avg_response": {"avg": {"field": "responsetime"}}, + "time.min": {"min": {"field": "time"}} + } + } + } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml index ffba67f879145..98bd095917917 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml @@ -74,7 +74,7 @@ setup: body: > { "source": { "index": "airline-data" }, - "dest": { "index": "airline-data-by-airline-again" }, + "dest": { "index": "airline-data-by-airline-again", "pipeline": "airline-pipeline" }, "pivot": { "group_by": { "airline": {"terms": {"field": "airline"}}}, "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} @@ -127,7 +127,7 @@ setup: transform_id: "airline-transform*" from: 0 size: 1 - - match: { count: 1 } + - match: { count: 2 } - match: { transforms.0.id: "airline-transform" } - do: @@ -135,7 +135,7 @@ setup: transform_id: "airline-transform*" from: 1 size: 1 - - match: { count: 1 } + - match: { count: 2 } - match: { transforms.0.id: "airline-transform-dos" } --- diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml index 6dc623683b43c..486a198276ab2 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml @@ -123,7 +123,7 @@ teardown: transform_id: "_all" from: 0 size: 1 - - match: { count: 1 } + - match: { count: 3 } - match: { transforms.0.id: "airline-transform-stats" } - do: @@ -131,7 +131,7 @@ teardown: transform_id: "_all" from: 1 size: 2 - - match: { count: 2 } + - match: { count: 3 } - match: { transforms.0.id: "airline-transform-stats-dos" } - match: { transforms.1.id: "airline-transform-stats-the-third" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml index 884bc80437738..d886ec1fb7ff6 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml @@ -86,7 +86,13 @@ setup: from: 1 size: 1 - - match: { count: 1 } + - match: { count: 2 } + + - match: + filters.0: + filter_id: "filter-foo2" + description: "This filter has a description" + items: ["123", "lmnop"] --- "Test get filters API with expression ID": diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/TimeThrottleIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/TimeThrottleIntegrationTests.java index b138c35c07bc5..671d3ec44ed5e 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/TimeThrottleIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/TimeThrottleIntegrationTests.java @@ -7,14 +7,13 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectPath; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; -import org.elasticsearch.common.xcontent.ObjectPath; import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequestBuilder; import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; @@ -30,10 +29,6 @@ import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; import static org.hamcrest.Matchers.is; -@TestLogging("org.elasticsearch.xpack.watcher:DEBUG," + - "org.elasticsearch.xpack.watcher.WatcherLifeCycleService:DEBUG," + - "org.elasticsearch.xpack.watcher.trigger.ScheduleTriggerMock:TRACE," + - "org.elasticsearch.xpack.watcher.WatcherIndexingListener:TRACE") public class TimeThrottleIntegrationTests extends AbstractWatcherIntegrationTestCase { public void testTimeThrottle(){ diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java index 1029fed379229..01fe96c480da0 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; import org.elasticsearch.xpack.core.watcher.condition.Condition; import org.elasticsearch.xpack.core.watcher.condition.ExecutableCondition; @@ -47,7 +46,6 @@ /** * This test makes sure per-action conditions are honored. */ -@TestLogging("org.elasticsearch.xpack.watcher:DEBUG,org.elasticsearch.xpack.watcher.WatcherIndexingListener:TRACE") public class HistoryActionConditionTests extends AbstractWatcherIntegrationTestCase { private final Input input = simpleInput("key", 15).build(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateEmailMappingsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateEmailMappingsTests.java index a4767dd6e53cb..9bbddf9e0aee6 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateEmailMappingsTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateEmailMappingsTests.java @@ -11,7 +11,6 @@ import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequestBuilder; @@ -35,8 +34,6 @@ * This test makes sure that the email address fields in the watch_record action result are * not analyzed so they can be used in aggregations */ -@TestLogging("org.elasticsearch.xpack.watcher:DEBUG," + - "org.elasticsearch.xpack.watcher.WatcherIndexingListener:TRACE") public class HistoryTemplateEmailMappingsTests extends AbstractWatcherIntegrationTestCase { private EmailServer server; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java index 17f2864abceb3..08854743a41ef 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; import org.elasticsearch.xpack.core.watcher.transport.actions.delete.DeleteWatchRequestBuilder; @@ -62,8 +61,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -@TestLogging("org.elasticsearch.xpack.watcher:DEBUG," + - "org.elasticsearch.xpack.watcher.WatcherIndexingListener:TRACE") public class BasicWatcherTests extends AbstractWatcherIntegrationTestCase { public void testIndexWatch() throws Exception { @@ -167,7 +164,6 @@ public void testMalformedWatch() throws Exception { } } - @TestLogging("org.elasticsearch.xpack.watcher:DEBUG") public void testModifyWatches() throws Exception { createIndex("idx"); WatcherSearchTemplateRequest searchRequest = templateRequest(searchSource().query(matchAllQuery()), "idx"); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java index 744b875ffd569..d31e083f63f51 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.watcher.test.integration; - import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetRequest; @@ -15,7 +14,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; @@ -49,7 +47,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.core.IsEqual.equalTo; -@TestLogging("org.elasticsearch.xpack.watcher:DEBUG") public class WatchAckTests extends AbstractWatcherIntegrationTestCase { private String id = randomAlphaOfLength(10); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java index 2ae469408395a..4c3f4ff26524e 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchRequestBuilder; import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequestBuilder; import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; @@ -156,7 +155,6 @@ public void testScriptTransform() throws Exception { assertThat(response.getHits().getAt(0).getSourceAsMap().get("key3").toString(), equalTo("20")); } - @TestLogging("org.elasticsearch.xpack.watcher:DEBUG") public void testSearchTransform() throws Exception { createIndex("my-condition-index", "my-payload-index"); ensureGreen("my-condition-index", "my-payload-index"); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/get/GetWatchTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/get/GetWatchTests.java index 90cc84bd6924a..72a2ffc5d62a0 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/get/GetWatchTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/get/GetWatchTests.java @@ -8,7 +8,6 @@ import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchRequestBuilder; import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchResponse; @@ -31,8 +30,6 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -// added due to https://github.com/elastic/x-pack-elasticsearch/issues/3854 -@TestLogging("org.elasticsearch.action.search:DEBUG") public class GetWatchTests extends AbstractWatcherIntegrationTestCase { public void testGet() throws Exception { diff --git a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/OpenLdapUserSearchSessionFactoryTests.java b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/OpenLdapUserSearchSessionFactoryTests.java index b899d28273d5a..2f732110eecbe 100644 --- a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/OpenLdapUserSearchSessionFactoryTests.java +++ b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/OpenLdapUserSearchSessionFactoryTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.OpenLdapTests; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.authc.RealmConfig; @@ -43,7 +42,6 @@ import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; -@TestLogging("org.elasticsearch.xpack.core.ssl.SSLService:TRACE") public class OpenLdapUserSearchSessionFactoryTests extends ESTestCase { private Settings globalSettings; diff --git a/x-pack/qa/reindex-tests-with-security/build.gradle b/x-pack/qa/reindex-tests-with-security/build.gradle index 48384aed17a84..1cedd1f5bf6c6 100644 --- a/x-pack/qa/reindex-tests-with-security/build.gradle +++ b/x-pack/qa/reindex-tests-with-security/build.gradle @@ -1,5 +1,4 @@ -import org.elasticsearch.gradle.http.WaitForHttpResource - +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -22,10 +21,11 @@ forbiddenPatterns { File caFile = project.file('src/test/resources/ssl/ca.p12') -integTestCluster { +testClusters.integTest { + distribution = "DEFAULT" // Whitelist reindexing from the local node so we can test it. - extraConfigFile 'http.key', project.projectDir.toPath().resolve('src/test/resources/ssl/http.key') - extraConfigFile 'http.crt', project.projectDir.toPath().resolve('src/test/resources/ssl/http.crt') + extraConfigFile 'http.key', file('src/test/resources/ssl/http.key') + extraConfigFile 'http.crt', file('src/test/resources/ssl/http.crt') extraConfigFile 'ca.p12', caFile setting 'reindex.remote.whitelist', '127.0.0.1:*' setting 'xpack.ilm.enabled', 'false' @@ -46,26 +46,13 @@ integTestCluster { } } - extraConfigFile 'roles.yml', 'roles.yml' - [ - test_admin: 'superuser', - powerful_user: 'superuser', - minimal_user: 'minimal', - minimal_with_task_user: 'minimal_with_task', - readonly_user: 'readonly', - dest_only_user: 'dest_only', - can_not_see_hidden_docs_user: 'can_not_see_hidden_docs', - can_not_see_hidden_fields_user: 'can_not_see_hidden_fields', - ].each { String user, String role -> - setupCommand 'setupUser#' + user, - 'bin/elasticsearch-users', 'useradd', user, '-p', 'x-pack-test-password', '-r', role - } - waitCondition = { node, ant -> - WaitForHttpResource http = new WaitForHttpResource("https", node.httpUri(), numNodes) - http.setTrustStoreFile(caFile) - http.setTrustStorePassword("password") - http.setUsername("test_admin") - http.setPassword("x-pack-test-password") - return http.wait(5000) - } + extraConfigFile 'roles.yml', file('roles.yml') + user username: "test_admin", password: 'x-pack-test-password', role: "superuser" + user username: "powerful_user", password: 'x-pack-test-password', role: "superuser" + user username: "minimal_user", password: 'x-pack-test-password', role: "minimal" + user username: "minimal_with_task_user", password: 'x-pack-test-password', role: "minimal_with_task" + user username: "readonly_user", password: 'x-pack-test-password', role: "readonly" + user username: "dest_only_user", password: 'x-pack-test-password', role: "dest_only" + user username: "can_not_see_hidden_docs_user", password: 'x-pack-test-password', role: "can_not_see_hidden_docs" + user username: "can_not_see_hidden_fields_user", password: 'x-pack-test-password', role: "can_not_see_hidden_fields" } diff --git a/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java b/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java index 40ed71df842f3..780428d815dc8 100644 --- a/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java +++ b/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.yaml.ObjectPath; import org.junit.After; @@ -19,7 +18,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.is; -@TestLogging("org.elasticsearch.client:TRACE,tracer:TRACE") @AwaitsFix(bugUrl = "flaky tests") public class MonitoringWithWatcherRestIT extends ESRestTestCase { diff --git a/x-pack/qa/smoke-test-plugins-ssl/build.gradle b/x-pack/qa/smoke-test-plugins-ssl/build.gradle index 2f850636037d5..78c0356c5c2a4 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/build.gradle +++ b/x-pack/qa/smoke-test-plugins-ssl/build.gradle @@ -2,6 +2,7 @@ import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.test.NodeInfo import org.elasticsearch.gradle.http.WaitForHttpResource +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -38,16 +39,11 @@ task copyKeyCerts(type: Copy) { sourceSets.test.resources.srcDir(keystoreDir) processTestResources.dependsOn(copyKeyCerts) -integTestCluster.dependsOn(copyKeyCerts) +integTest.runner.dependsOn(copyKeyCerts) -ext.pluginsCount = 0 -project(':plugins').getChildProjects().each { pluginName, pluginProject -> - // need to get a non-decorated project object, so must re-lookup the project by path - integTestCluster.plugin(pluginProject.path) - pluginsCount += 1 -} - -integTestCluster { +def pluginsCount = 0 +testClusters.integTest { + distribution = "DEFAULT" setting 'xpack.monitoring.collection.interval', '1s' setting 'xpack.monitoring.exporters._http.type', 'http' setting 'xpack.monitoring.exporters._http.enabled', 'false' @@ -62,7 +58,7 @@ integTestCluster { setting 'xpack.security.http.ssl.key', 'testnode.pem' setting 'xpack.security.http.ssl.certificate', 'testnode.crt' setting 'xpack.security.http.ssl.certificate_authorities', 'testnode.crt' - keystoreSetting 'xpack.security.http.ssl.secure_key_passphrase', 'testnode' + keystore 'xpack.security.http.ssl.secure_key_passphrase', 'testnode' setting 'xpack.ilm.enabled', 'false' setting 'xpack.ml.enabled', 'false' @@ -74,18 +70,13 @@ integTestCluster { extraConfigFile clientKey.name, clientKey extraConfigFile clientCert.name, clientCert - setupCommand 'setupTestUser', - 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' - setupCommand 'setupMonitoringUser', - 'bin/elasticsearch-users', 'useradd', 'monitoring_agent', '-p', 'x-pack-test-password', '-r', 'remote_monitoring_agent' + user username: "test_user", password: "x-pack-test-password" + user username: "monitoring_agent", password: "x-pack-test-password", role: "remote_monitoring_agent" - waitCondition = { NodeInfo node, AntBuilder ant -> - WaitForHttpResource http = new WaitForHttpResource("https", node.httpUri(), numNodes) - http.setTrustStoreFile(clientKeyStore) - http.setTrustStorePassword("testclient") - http.setUsername("test_user") - http.setPassword("x-pack-test-password") - return http.wait(5000) + project(':plugins').getChildProjects().each { pluginName, pluginProject -> + plugin file(pluginProject.tasks.bundlePlugin.archiveFile) + tasks.integTest.dependsOn pluginProject.tasks.bundlePlugin + pluginsCount += 1 } } diff --git a/x-pack/qa/smoke-test-plugins/build.gradle b/x-pack/qa/smoke-test-plugins/build.gradle index 5aa3adbdf09f5..86fb4ee0f07d6 100644 --- a/x-pack/qa/smoke-test-plugins/build.gradle +++ b/x-pack/qa/smoke-test-plugins/build.gradle @@ -1,5 +1,6 @@ import org.elasticsearch.gradle.MavenFilteringHack +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -7,27 +8,16 @@ dependencies { testCompile project(':x-pack:qa') } -ext.pluginsCount = 0 -project(':plugins').getChildProjects().each { pluginName, pluginProject -> - // need to get a non-decorated project object, so must re-lookup the project by path - integTestCluster.plugin(pluginProject.path) - pluginsCount += 1 -} - -integTestCluster { +int pluginsCount = 0 +testClusters.integTest { + distribution = "DEFAULT" setting 'xpack.security.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' - setupCommand 'setupDummyUser', - 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' - waitCondition = { node, ant -> - File tmpFile = new File(node.cwd, 'wait.success') - ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", - dest: tmpFile.toString(), - username: 'test_user', - password: 'x-pack-test-password', - ignoreerrors: true, - retries: 10) - return tmpFile.exists() + user username: "test_user", password: "x-pack-test-password" + project(':plugins').getChildProjects().each { pluginName, pluginProject -> + plugin file(pluginProject.tasks.bundlePlugin.archiveFile) + tasks.integTest.dependsOn pluginProject.tasks.bundlePlugin + pluginsCount += 1 } } diff --git a/x-pack/qa/third-party/jira/build.gradle b/x-pack/qa/third-party/jira/build.gradle index c01f6f129b9b1..c503981a1a8c2 100644 --- a/x-pack/qa/third-party/jira/build.gradle +++ b/x-pack/qa/third-party/jira/build.gradle @@ -3,6 +3,7 @@ import groovy.json.JsonSlurper import javax.net.ssl.HttpsURLConnection import java.nio.charset.StandardCharsets +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -11,23 +12,18 @@ dependencies { testCompile project(path: xpackModule('watcher'), configuration: 'runtime') } -ext { - jiraUrl = System.getenv('jira_url') - jiraUser = System.getenv('jira_user') - jiraPassword = System.getenv('jira_password') - jiraProject = System.getenv('jira_project') -} -integTestCluster { +String jiraUrl = System.getenv('jira_url') +String jiraUser = System.getenv('jira_user') +String jiraPassword = System.getenv('jira_password') +String jiraProject = System.getenv('jira_project') + +testClusters.integTest { setting 'xpack.security.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.ml.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' setting 'logger.org.elasticsearch.xpack.watcher', 'DEBUG' - keystoreSetting 'xpack.notification.jira.account.test.secure_url', jiraUrl - keystoreSetting 'xpack.notification.jira.account.test.secure_user', jiraUser - keystoreSetting 'xpack.notification.jira.account.test.secure_password', jiraPassword - setting 'xpack.notification.jira.account.test.issue_defaults.project.key', jiraProject setting 'xpack.notification.jira.account.test.issue_defaults.issuetype.name', 'Bug' setting 'xpack.notification.jira.account.test.issue_defaults.labels.0', 'integration-tests' } @@ -49,7 +45,13 @@ if (!jiraUrl && !jiraUser && !jiraPassword && !jiraProject) { integTest.enabled = false testingConventions.enabled = false } else { - integTestRunner.finalizedBy cleanJira + testClusters.integTest { + setting 'xpack.notification.jira.account.test.issue_defaults.project.key', jiraProject + keystore 'xpack.notification.jira.account.test.secure_url', jiraUrl + keystore 'xpack.notification.jira.account.test.secure_user', jiraUser + keystore 'xpack.notification.jira.account.test.secure_password', jiraPassword + } + integTest.runner.finalizedBy cleanJira } /** List all issues associated to a given Jira project **/ diff --git a/x-pack/qa/third-party/pagerduty/build.gradle b/x-pack/qa/third-party/pagerduty/build.gradle index 69c9848447044..ebee73f9eaaed 100644 --- a/x-pack/qa/third-party/pagerduty/build.gradle +++ b/x-pack/qa/third-party/pagerduty/build.gradle @@ -1,3 +1,4 @@ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -8,16 +9,16 @@ dependencies { String pagerDutyServiceKey = System.getenv('pagerduty_service_api_key') -integTestCluster { - setting 'xpack.security.enabled', 'false' - setting 'xpack.monitoring.enabled', 'false' - setting 'xpack.ml.enabled', 'false' - setting 'xpack.license.self_generated.type', 'trial' - setting 'logger.org.elasticsearch.xpack.watcher', 'DEBUG' - keystoreSetting 'xpack.notification.pagerduty.account.test_account.secure_service_api_key', pagerDutyServiceKey -} - if (!pagerDutyServiceKey) { integTest.enabled = false testingConventions.enabled = false +} else { + testClusters.integTest { + setting 'xpack.security.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + setting 'logger.org.elasticsearch.xpack.watcher', 'DEBUG' + keystore 'xpack.notification.pagerduty.account.test_account.secure_service_api_key', pagerDutyServiceKey + } } diff --git a/x-pack/qa/third-party/slack/build.gradle b/x-pack/qa/third-party/slack/build.gradle index 956631714c040..de0550be81943 100644 --- a/x-pack/qa/third-party/slack/build.gradle +++ b/x-pack/qa/third-party/slack/build.gradle @@ -1,3 +1,4 @@ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -8,16 +9,16 @@ dependencies { String slackUrl = System.getenv('slack_url') -integTestCluster { - setting 'xpack.security.enabled', 'false' - setting 'xpack.monitoring.enabled', 'false' - setting 'xpack.ml.enabled', 'false' - setting 'xpack.license.self_generated.type', 'trial' - setting 'logger.org.elasticsearch.xpack.watcher', 'DEBUG' - keystoreSetting 'xpack.notification.slack.account.test_account.secure_url', slackUrl -} - if (!slackUrl) { integTest.enabled = false testingConventions.enabled = false +} else { + testClusters.integTest { + setting 'xpack.security.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + setting 'logger.org.elasticsearch.xpack.watcher', 'DEBUG' + keystore 'xpack.notification.slack.account.test_account.secure_url', slackUrl + } }