diff --git a/build.gradle b/build.gradle index 6453db4c0fb88..cfc8401a934e0 100644 --- a/build.gradle +++ b/build.gradle @@ -204,25 +204,15 @@ task branchConsistency { } subprojects { - project.afterEvaluate { - // ignore missing javadocs - tasks.withType(Javadoc) { Javadoc javadoc -> - // the -quiet here is because of a bug in gradle, in that adding a string option - // by itself is not added to the options. By adding quiet, both this option and - // the "value" -quiet is added, separated by a space. This is ok since the javadoc - // command already adds -quiet, so we are just duplicating it - // see https://discuss.gradle.org/t/add-custom-javadoc-option-that-does-not-take-an-argument/5959 - javadoc.options.encoding='UTF8' - javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet') - /* - TODO: building javadocs with java 9 b118 is currently broken with weird errors, so - for now this is commented out...try again with the next ea build... - javadoc.executable = new File(project.javaHome, 'bin/javadoc') - if (project.javaVersion == JavaVersion.VERSION_1_9) { - // TODO: remove this hack! gradle should be passing this... - javadoc.options.addStringOption('source', '8') - }*/ - } + // ignore missing javadocs + tasks.withType(Javadoc) { Javadoc javadoc -> + // the -quiet here is because of a bug in gradle, in that adding a string option + // by itself is not added to the options. By adding quiet, both this option and + // the "value" -quiet is added, separated by a space. This is ok since the javadoc + // command already adds -quiet, so we are just duplicating it + // see https://discuss.gradle.org/t/add-custom-javadoc-option-that-does-not-take-an-argument/5959 + javadoc.options.encoding='UTF8' + javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet') } /* Sets up the dependencies that we build as part of this project but @@ -280,6 +270,27 @@ subprojects { } } } + + // Handle javadoc dependencies across projects. Order matters: the linksOffline for + // org.elasticsearch:elasticsearch must be the last one or all the links for the + // other packages (e.g org.elasticsearch.client) will point to core rather than + // their own artifacts. + if (project.plugins.hasPlugin(BuildPlugin)) { + String artifactsHost = VersionProperties.elasticsearch.endsWith("-SNAPSHOT") ? "https://snapshots.elastic.co" : "https://artifacts.elastic.co" + Closure sortClosure = { a, b -> b.group <=> a.group } + Closure depJavadocClosure = { dep -> + if (dep.group != null && dep.group.startsWith('org.elasticsearch')) { + String substitution = project.ext.projectSubstitutions.get("${dep.group}:${dep.name}:${dep.version}") + if (substitution != null) { + project.javadoc.dependsOn substitution + ':javadoc' + String artifactPath = dep.group.replaceAll('\\.', '/') + '/' + dep.name.replaceAll('\\.', '/') + '/' + dep.version + project.javadoc.options.linksOffline artifactsHost + "/javadoc/" + artifactPath, "${project.project(substitution).buildDir}/docs/javadoc/" + } + } + } + project.configurations.compile.dependencies.findAll().toSorted(sortClosure).each(depJavadocClosure) + project.configurations.provided.dependencies.findAll().toSorted(sortClosure).each(depJavadocClosure) + } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index c86b2b6cb79ad..e836bd2fa2675 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -455,28 +455,8 @@ class BuildPlugin implements Plugin { } static void configureJavadoc(Project project) { - String artifactsHost = VersionProperties.elasticsearch.endsWith("-SNAPSHOT") ? "https://snapshots.elastic.co" : "https://artifacts.elastic.co" - project.afterEvaluate { - project.tasks.withType(Javadoc) { - executable = new File(project.javaHome, 'bin/javadoc') - } - /* - * Order matters, the linksOffline for org.elasticsearch:elasticsearch must be the last one - * or all the links for the other packages (e.g org.elasticsearch.client) will point to core rather than their own artifacts - */ - Closure sortClosure = { a, b -> b.group <=> a.group } - Closure depJavadocClosure = { dep -> - if (dep.group != null && dep.group.startsWith('org.elasticsearch')) { - String substitution = project.ext.projectSubstitutions.get("${dep.group}:${dep.name}:${dep.version}") - if (substitution != null) { - project.javadoc.dependsOn substitution + ':javadoc' - String artifactPath = dep.group.replaceAll('\\.', '/') + '/' + dep.name.replaceAll('\\.', '/') + '/' + dep.version - project.javadoc.options.linksOffline artifactsHost + "/javadoc/" + artifactPath, "${project.project(substitution).buildDir}/docs/javadoc/" - } - } - } - project.configurations.compile.dependencies.findAll().toSorted(sortClosure).each(depJavadocClosure) - project.configurations.provided.dependencies.findAll().toSorted(sortClosure).each(depJavadocClosure) + project.tasks.withType(Javadoc) { + executable = new File(project.javaHome, 'bin/javadoc') } configureJavadocJar(project) } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy index ed62e88c567fa..e574d67f2ace1 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy @@ -63,11 +63,6 @@ public class ForbiddenPatternsTask extends DefaultTask { patterns.put('nocommit should be all lowercase or all uppercase', /((?i)nocommit)(? { } String version = pipe.toString().trim() if (runResult.exitValue == 0) { - if (version ==~ /Vagrant 1\.(8\.[6-9]|9\.[0-9])+/) { + if (version ==~ /Vagrant 1\.(8\.[6-9]|9\.[0-9])+/ || version ==~ /Vagrant 2\.[0-9]+\.[0-9]+/) { return [ 'supported' : true ] } else { return [ 'supported' : false, diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 600bf602080f3..ba97605dba82e 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -59,4 +59,5 @@ forbiddenApisMain { // core does not depend on the httpclient for compile so we add the signatures here. We don't add them for test as they are already // specified signaturesURLs += [PrecommitTasks.getResource('/forbidden/http-signatures.txt')] + signaturesURLs += [file('src/main/resources/forbidden/rest-high-level-signatures.txt').toURI().toURL()] } \ No newline at end of file diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java index 9e881cf7b9add..77e501551cd53 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -42,6 +42,7 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.unit.TimeValue; @@ -57,6 +58,7 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.nio.charset.Charset; import java.util.Collections; import java.util.HashMap; import java.util.Locale; @@ -139,8 +141,8 @@ static Request bulk(BulkRequest bulkRequest) throws IOException { bulkContentType = XContentType.JSON; } - byte separator = bulkContentType.xContent().streamSeparator(); - ContentType requestContentType = ContentType.create(bulkContentType.mediaType()); + final byte separator = bulkContentType.xContent().streamSeparator(); + final ContentType requestContentType = createContentType(bulkContentType); ByteArrayOutputStream content = new ByteArrayOutputStream(); for (DocWriteRequest request : bulkRequest.requests()) { @@ -268,7 +270,7 @@ static Request index(IndexRequest indexRequest) { parameters.withWaitForActiveShards(indexRequest.waitForActiveShards()); BytesRef source = indexRequest.source().toBytesRef(); - ContentType contentType = ContentType.create(indexRequest.getContentType().mediaType()); + ContentType contentType = createContentType(indexRequest.getContentType()); HttpEntity entity = new ByteArrayEntity(source.bytes, source.offset, source.length, contentType); return new Request(method, endpoint, parameters.getParams(), entity); @@ -352,7 +354,7 @@ static Request clearScroll(ClearScrollRequest clearScrollRequest) throws IOExcep private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef(); - return new ByteArrayEntity(source.bytes, source.offset, source.length, ContentType.create(xContentType.mediaType())); + return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); } static String endpoint(String[] indices, String[] types, String endpoint) { @@ -372,6 +374,17 @@ static String endpoint(String... parts) { return joiner.toString(); } + /** + * Returns a {@link ContentType} from a given {@link XContentType}. + * + * @param xContentType the {@link XContentType} + * @return the {@link ContentType} + */ + @SuppressForbidden(reason = "Only allowed place to convert a XContentType to a ContentType") + static ContentType createContentType(final XContentType xContentType) { + return ContentType.create(xContentType.mediaTypeWithoutParameters(), (Charset) null); + } + /** * Utility class to build request's parameters map and centralize all parameter names. */ diff --git a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt new file mode 100644 index 0000000000000..fb2330f3f083c --- /dev/null +++ b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt @@ -0,0 +1,21 @@ +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on +# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +@defaultMessage Use Request#createContentType(XContentType) to be sure to pass the right MIME type +org.apache.http.entity.ContentType#create(java.lang.String) +org.apache.http.entity.ContentType#create(java.lang.String,java.lang.String) +org.apache.http.entity.ContentType#create(java.lang.String,java.nio.charset.Charset) +org.apache.http.entity.ContentType#create(java.lang.String,org.apache.http.NameValuePair[]) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java index f18e348adce5e..f7996bec924ef 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java @@ -278,7 +278,7 @@ public void testIndex() throws IOException { HttpEntity entity = request.entity; assertTrue(entity instanceof ByteArrayEntity); - assertEquals(indexRequest.getContentType().mediaType(), entity.getContentType().getValue()); + assertEquals(indexRequest.getContentType().mediaTypeWithoutParameters(), entity.getContentType().getValue()); try (XContentParser parser = createParser(xContentType.xContent(), entity.getContent())) { assertEquals(nbFields, parser.map().size()); } @@ -488,7 +488,7 @@ public void testBulk() throws IOException { assertEquals("/_bulk", request.endpoint); assertEquals(expectedParams, request.params); assertEquals("POST", request.method); - assertEquals(xContentType.mediaType(), request.entity.getContentType().getValue()); + assertEquals(xContentType.mediaTypeWithoutParameters(), request.entity.getContentType().getValue()); byte[] content = new byte[(int) request.entity.getContentLength()]; try (InputStream inputStream = request.entity.getContent()) { Streams.readFully(inputStream, content); @@ -541,7 +541,7 @@ public void testBulkWithDifferentContentTypes() throws IOException { bulkRequest.add(new DeleteRequest("index", "type", "2")); Request request = Request.bulk(bulkRequest); - assertEquals(XContentType.JSON.mediaType(), request.entity.getContentType().getValue()); + assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), request.entity.getContentType().getValue()); } { XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); @@ -551,7 +551,7 @@ public void testBulkWithDifferentContentTypes() throws IOException { bulkRequest.add(new DeleteRequest("index", "type", "2")); Request request = Request.bulk(bulkRequest); - assertEquals(xContentType.mediaType(), request.entity.getContentType().getValue()); + assertEquals(xContentType.mediaTypeWithoutParameters(), request.entity.getContentType().getValue()); } { XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); @@ -563,7 +563,7 @@ public void testBulkWithDifferentContentTypes() throws IOException { } Request request = Request.bulk(new BulkRequest().add(updateRequest)); - assertEquals(xContentType.mediaType(), request.entity.getContentType().getValue()); + assertEquals(xContentType.mediaTypeWithoutParameters(), request.entity.getContentType().getValue()); } { BulkRequest bulkRequest = new BulkRequest(); @@ -732,7 +732,7 @@ public void testSearchScroll() throws IOException { assertEquals("/_search/scroll", request.endpoint); assertEquals(0, request.params.size()); assertToXContentBody(searchScrollRequest, request.entity); - assertEquals(Request.REQUEST_BODY_CONTENT_TYPE.mediaType(), request.entity.getContentType().getValue()); + assertEquals(Request.REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.entity.getContentType().getValue()); } public void testClearScroll() throws IOException { @@ -746,12 +746,12 @@ public void testClearScroll() throws IOException { assertEquals("/_search/scroll", request.endpoint); assertEquals(0, request.params.size()); assertToXContentBody(clearScrollRequest, request.entity); - assertEquals(Request.REQUEST_BODY_CONTENT_TYPE.mediaType(), request.entity.getContentType().getValue()); + assertEquals(Request.REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.entity.getContentType().getValue()); } private static void assertToXContentBody(ToXContent expectedBody, HttpEntity actualEntity) throws IOException { BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, Request.REQUEST_BODY_CONTENT_TYPE, false); - assertEquals(XContentType.JSON.mediaType(), actualEntity.getContentType().getValue()); + assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType().getValue()); assertEquals(expectedBytes, new BytesArray(EntityUtils.toByteArray(actualEntity))); } @@ -793,6 +793,11 @@ public void testEndpoint() { assertEquals("/a/_create", Request.endpoint("a", null, null, "_create")); } + public void testCreateContentType() { + final XContentType xContentType = randomFrom(XContentType.values()); + assertEquals(xContentType.mediaTypeWithoutParameters(), Request.createContentType(xContentType).getMimeType()); + } + public void testEnforceSameContentType() { XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); IndexRequest indexRequest = new IndexRequest().source(singletonMap("field", "value"), xContentType); diff --git a/core/src/main/java/org/elasticsearch/Build.java b/core/src/main/java/org/elasticsearch/Build.java index bef9fafe3ca70..7e46b340dfc01 100644 --- a/core/src/main/java/org/elasticsearch/Build.java +++ b/core/src/main/java/org/elasticsearch/Build.java @@ -19,6 +19,7 @@ package org.elasticsearch; +import org.elasticsearch.common.Booleans; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -59,7 +60,18 @@ public class Build { // not running from the official elasticsearch jar file (unit tests, IDE, uber client jar, shadiness) shortHash = "Unknown"; date = "Unknown"; - isSnapshot = true; + final String buildSnapshot = System.getProperty("build.snapshot"); + if (buildSnapshot != null) { + try { + Class.forName("com.carrotsearch.randomizedtesting.RandomizedContext"); + } catch (final ClassNotFoundException e) { + // we are not in tests but build.snapshot is set, bail hard + throw new IllegalStateException("build.snapshot set to [" + buildSnapshot + "] but not running tests"); + } + isSnapshot = Booleans.parseBoolean(buildSnapshot); + } else { + isSnapshot = true; + } } if (shortHash == null) { throw new IllegalStateException("Error finding the build shortHash. " + diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 935278044c483..b422345aa426c 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -92,6 +92,9 @@ public class Version implements Comparable { public static final Version V_5_5_3 = new Version(V_5_5_3_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); public static final int V_5_6_0_ID = 5060099; public static final Version V_5_6_0 = new Version(V_5_6_0_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); + public static final int V_5_6_1_ID = 5060199; + // use proper Lucene constant once we are on a Lucene snapshot that knows about 6.6.1 + public static final Version V_5_6_1 = new Version(V_5_6_1_ID, org.apache.lucene.util.Version.fromBits(6, 6, 1)); public static final int V_6_0_0_alpha1_ID = 6000001; public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0); @@ -142,6 +145,8 @@ public static Version fromId(int id) { return V_6_0_0_alpha2; case V_6_0_0_alpha1_ID: return V_6_0_0_alpha1; + case V_5_6_1_ID: + return V_5_6_1; case V_5_6_0_ID: return V_5_6_0; case V_5_5_3_ID: diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 6b1cf09bd736a..fa3b16ef0d579 100644 --- a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -33,7 +33,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; @@ -176,7 +176,7 @@ public long getVersion() { } /** - * Returns the sequence number assigned for this change. Returns {@link SequenceNumbersService#UNASSIGNED_SEQ_NO} if the operation + * Returns the sequence number assigned for this change. Returns {@link SequenceNumbers#UNASSIGNED_SEQ_NO} if the operation * wasn't performed (i.e., an update operation that resulted in a NOOP). */ public long getSeqNo() { @@ -263,7 +263,7 @@ public void readFrom(StreamInput in) throws IOException { seqNo = in.readZLong(); primaryTerm = in.readVLong(); } else { - seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; primaryTerm = 0; } forcedRefresh = in.readBoolean(); @@ -375,7 +375,7 @@ public abstract static class Builder { protected Result result = null; protected boolean forcedRefresh; protected ShardInfo shardInfo = null; - protected Long seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + protected Long seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; protected Long primaryTerm = 0L; public ShardId getShardId() { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index bb8bcda2003a2..6aaece4c986f2 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -94,7 +94,7 @@ protected void masterOperation(final ClusterAllocationExplainRequest request, fi final RoutingNodes routingNodes = state.getRoutingNodes(); final ClusterInfo clusterInfo = clusterInfoService.getClusterInfo(); final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state, - clusterInfo, System.nanoTime(), false); + clusterInfo, System.nanoTime()); ShardRouting shardRouting = findShardToExplain(request, allocation); logger.debug("explaining the allocation for [{}], found shard [{}]", request, shardRouting); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index 1c4cafc8d60e4..6fd2fd2da848b 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -37,7 +37,7 @@ import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -182,15 +182,15 @@ public static class Failure implements Writeable, ToXContentFragment { * to record operation sequence no with failure */ public Failure(String index, String type, String id, Exception cause) { - this(index, type, id, cause, ExceptionsHelper.status(cause), SequenceNumbersService.UNASSIGNED_SEQ_NO, false); + this(index, type, id, cause, ExceptionsHelper.status(cause), SequenceNumbers.UNASSIGNED_SEQ_NO, false); } public Failure(String index, String type, String id, Exception cause, boolean aborted) { - this(index, type, id, cause, ExceptionsHelper.status(cause), SequenceNumbersService.UNASSIGNED_SEQ_NO, aborted); + this(index, type, id, cause, ExceptionsHelper.status(cause), SequenceNumbers.UNASSIGNED_SEQ_NO, aborted); } public Failure(String index, String type, String id, Exception cause, RestStatus status) { - this(index, type, id, cause, status, SequenceNumbersService.UNASSIGNED_SEQ_NO, false); + this(index, type, id, cause, status, SequenceNumbers.UNASSIGNED_SEQ_NO, false); } /** For write failures after operation was assigned a sequence number. */ @@ -220,7 +220,7 @@ public Failure(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { seqNo = in.readZLong(); } else { - seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; } if (supportsAbortedFlag(in.getVersion())) { aborted = in.readBoolean(); @@ -292,7 +292,7 @@ public Exception getCause() { /** * The operation sequence number generated by primary - * NOTE: {@link SequenceNumbersService#UNASSIGNED_SEQ_NO} + * NOTE: {@link SequenceNumbers#UNASSIGNED_SEQ_NO} * indicates sequence number was not generated by primary */ public long getSeqNo() { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index c975e6412fe49..e66df2b0d9267 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -56,7 +56,7 @@ import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.SourceToParse; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; @@ -275,7 +275,7 @@ private static boolean isConflictException(final Exception e) { static BulkItemResultHolder processUpdateResponse(final UpdateRequest updateRequest, final String concreteIndex, final Engine.Result result, final UpdateHelper.Result translate, final IndexShard primary, final int bulkReqId) throws Exception { - assert result.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO : "failed result should not have a sequence number"; + assert result.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO : "failed result should not have a sequence number"; Engine.Operation.TYPE opType = result.getOperationType(); @@ -344,7 +344,7 @@ static BulkItemResultHolder executeUpdateRequestOnce(UpdateRequest updateRequest } catch (Exception failure) { // we may fail translating a update to index or delete operation // we use index result to communicate failure while translating update request - final Engine.Result result = new Engine.IndexResult(failure, updateRequest.version(), SequenceNumbersService.UNASSIGNED_SEQ_NO); + final Engine.Result result = new Engine.IndexResult(failure, updateRequest.version(), SequenceNumbers.UNASSIGNED_SEQ_NO); return new BulkItemResultHolder(null, result, primaryItemRequest); } @@ -446,7 +446,7 @@ static ReplicaItemExecutionMode replicaItemExecutionMode(final BulkItemRequest r final BulkItemResponse primaryResponse = request.getPrimaryResponse(); assert primaryResponse != null : "expected primary response to be set for item [" + index + "] request [" + request.request() + "]"; if (primaryResponse.isFailed()) { - return primaryResponse.getFailure().getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO + return primaryResponse.getFailure().getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO ? ReplicaItemExecutionMode.FAILURE // we have a seq no generated with the failure, replicate as no-op : ReplicaItemExecutionMode.NOOP; // no seq no generated, ignore replication } else { @@ -485,7 +485,7 @@ public static Translog.Location performOnReplica(BulkShardRequest request, Index break; case FAILURE: final BulkItemResponse.Failure failure = item.getPrimaryResponse().getFailure(); - assert failure.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO : "seq no must be assigned"; + assert failure.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO : "seq no must be assigned"; operationResult = replica.markSeqNoAsNoop(failure.getSeqNo(), failure.getMessage()); assert operationResult != null : "operation result must never be null when primary response has no failure"; location = syncOperationResultOrThrow(operationResult, location); diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 2ec84e35d1792..1a96a159179c4 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -53,6 +53,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; @@ -1035,7 +1036,7 @@ public ReplicaResponse(long localCheckpoint) { * checkpoint value when simulating responses to replication actions that pre-6.0 nodes are not aware of (e.g., the global * checkpoint background sync, and the primary/replica resync). */ - assert localCheckpoint != SequenceNumbersService.UNASSIGNED_SEQ_NO; + assert localCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO; this.localCheckpoint = localCheckpoint; } @@ -1235,7 +1236,7 @@ public void readFrom(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { globalCheckpoint = in.readZLong(); } else { - globalCheckpoint = SequenceNumbersService.UNASSIGNED_SEQ_NO; + globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; } } diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateResponse.java b/core/src/main/java/org/elasticsearch/action/update/UpdateResponse.java index 672b190d91130..9e33e62622a0e 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateResponse.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateResponse.java @@ -25,7 +25,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.get.GetResult; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; @@ -47,7 +47,7 @@ public UpdateResponse() { * For example: update script with operation set to none */ public UpdateResponse(ShardId shardId, String type, String id, long version, Result result) { - this(new ShardInfo(0, 0), shardId, type, id, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, version, result); + this(new ShardInfo(0, 0), shardId, type, id, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, version, result); } public UpdateResponse( diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Security.java b/core/src/main/java/org/elasticsearch/bootstrap/Security.java index e5c326b8ee174..a1ce20a0e27c8 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -19,7 +19,9 @@ package org.elasticsearch.bootstrap; +import org.elasticsearch.Build; import org.elasticsearch.SecureSM; +import org.elasticsearch.Version; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.network.NetworkModule; @@ -43,10 +45,12 @@ import java.security.Permissions; import java.security.Policy; import java.security.URIParameter; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashSet; +import java.util.List; import java.util.Map; import java.util.Set; @@ -191,6 +195,7 @@ static Map getPluginPermissions(Environment environment) throws I @SuppressForbidden(reason = "accesses fully qualified URLs to configure security") static Policy readPolicy(URL policyFile, Set codebases) { try { + List propertiesSet = new ArrayList<>(); try { // set codebase properties for (URL url : codebases) { @@ -198,7 +203,22 @@ static Policy readPolicy(URL policyFile, Set codebases) { if (shortName.endsWith(".jar") == false) { continue; // tests :( } - String previous = System.setProperty("codebase." + shortName, url.toString()); + String property = "codebase." + shortName; + if (shortName.startsWith("elasticsearch-rest-client")) { + // The rest client is currently the only example where we have an elasticsearch built artifact + // which needs special permissions in policy files when used. This temporary solution is to + // pass in an extra system property that omits the -version.jar suffix the other properties have. + // That allows the snapshots to reference snapshot builds of the client, and release builds to + // referenced release builds of the client, all with the same grant statements. + final String esVersion = Version.CURRENT + (Build.CURRENT.isSnapshot() ? "-SNAPSHOT" : ""); + final int index = property.indexOf("-" + esVersion + ".jar"); + assert index >= 0; + String restClientAlias = property.substring(0, index); + propertiesSet.add(restClientAlias); + System.setProperty(restClientAlias, url.toString()); + } + propertiesSet.add(property); + String previous = System.setProperty(property, url.toString()); if (previous != null) { throw new IllegalStateException("codebase property already set: " + shortName + "->" + previous); } @@ -206,12 +226,8 @@ static Policy readPolicy(URL policyFile, Set codebases) { return Policy.getInstance("JavaPolicy", new URIParameter(policyFile.toURI())); } finally { // clear codebase properties - for (URL url : codebases) { - String shortName = PathUtils.get(url.toURI()).getFileName().toString(); - if (shortName.endsWith(".jar") == false) { - continue; // tests :( - } - System.clearProperty("codebase." + shortName); + for (String property : propertiesSet) { + System.clearProperty(property); } } } catch (NoSuchAlgorithmException | URISyntaxException e) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java index 4b3f254c9f5fc..296eca476a6c5 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java @@ -46,7 +46,7 @@ public class OperationRouting extends AbstractComponent { public static final Setting USE_ADAPTIVE_REPLICA_SELECTION_SETTING = - Setting.boolSetting("cluster.routing.use_adaptive_replica_selection", false, + Setting.boolSetting("cluster.routing.use_adaptive_replica_selection", true, Setting.Property.Dynamic, Setting.Property.NodeScope); private String[] awarenessAttributes; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java b/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java index a28be6e38c38e..cba9acbbbb9fa 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.routing; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.allocation.decider.Decision; @@ -113,7 +114,11 @@ public enum Reason { /** * Unassigned after forcing an empty primary */ - FORCED_EMPTY_PRIMARY + FORCED_EMPTY_PRIMARY, + /** + * Forced manually to allocate + */ + MANUAL_ALLOCATION } /** @@ -262,7 +267,11 @@ public UnassignedInfo(StreamInput in) throws IOException { } public void writeTo(StreamOutput out) throws IOException { - out.writeByte((byte) reason.ordinal()); + if (out.getVersion().before(Version.V_6_0_0_beta2) && reason == Reason.MANUAL_ALLOCATION) { + out.writeByte((byte) Reason.ALLOCATION_FAILED.ordinal()); + } else { + out.writeByte((byte) reason.ordinal()); + } out.writeLong(unassignedTimeMillis); // Do not serialize unassignedTimeNanos as System.nanoTime() cannot be compared across different JVMs out.writeBoolean(delayed); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 6b0f8bfba2af4..774e4b9301ca4 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -97,7 +97,7 @@ public ClusterState applyStartedShards(ClusterState clusterState, List(startedShards); Collections.sort(startedShards, Comparator.comparing(ShardRouting::primary)); @@ -164,7 +164,7 @@ public ClusterState applyFailedShards(final ClusterState clusterState, final Lis routingNodes.unassigned().shuffle(); long currentNanoTime = currentNanoTime(); RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, tmpState, - clusterInfoService.getClusterInfo(), currentNanoTime, false); + clusterInfoService.getClusterInfo(), currentNanoTime); for (FailedShard failedShardEntry : failedShards) { ShardRouting shardToFail = failedShardEntry.getRoutingEntry(); @@ -202,7 +202,7 @@ public ClusterState deassociateDeadNodes(final ClusterState clusterState, boolea // shuffle the unassigned nodes, just so we won't have things like poison failed shards routingNodes.unassigned().shuffle(); RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, - clusterInfoService.getClusterInfo(), currentNanoTime(), false); + clusterInfoService.getClusterInfo(), currentNanoTime()); // first, clear from the shards any node id they used to belong to that is now dead deassociateDeadNodes(allocation); @@ -239,6 +239,22 @@ private void removeDelayMarkers(RoutingAllocation allocation) { } } + /** + * Reset failed allocation counter for unassigned shards + */ + private void resetFailedAllocationCounter(RoutingAllocation allocation) { + final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = allocation.routingNodes().unassigned().iterator(); + while (unassignedIterator.hasNext()) { + ShardRouting shardRouting = unassignedIterator.next(); + UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); + unassignedIterator.updateUnassigned(new UnassignedInfo(unassignedInfo.getNumFailedAllocations() > 0 ? + UnassignedInfo.Reason.MANUAL_ALLOCATION : unassignedInfo.getReason(), unassignedInfo.getMessage(), + unassignedInfo.getFailure(), 0, unassignedInfo.getUnassignedTimeInNanos(), + unassignedInfo.getUnassignedTimeInMillis(), unassignedInfo.isDelayed(), + unassignedInfo.getLastAllocationStatus()), shardRouting.recoverySource(), allocation.changes()); + } + } + /** * Internal helper to cap the number of elements in a potentially long list for logging. * @@ -262,7 +278,7 @@ public CommandsResult reroute(final ClusterState clusterState, AllocationCommand // a consistent result of the effect the commands have on the routing // this allows systems to dry run the commands, see the resulting cluster state, and act on it RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, - clusterInfoService.getClusterInfo(), currentNanoTime(), retryFailed); + clusterInfoService.getClusterInfo(), currentNanoTime()); // don't short circuit deciders, we want a full explanation allocation.debugDecision(true); // we ignore disable allocation, because commands are explicit @@ -272,6 +288,10 @@ public CommandsResult reroute(final ClusterState clusterState, AllocationCommand allocation.ignoreDisable(false); // the assumption is that commands will move / act on shards (or fail through exceptions) // so, there will always be shard "movements", so no need to check on reroute + + if (retryFailed) { + resetFailedAllocationCounter(allocation); + } reroute(allocation); return new CommandsResult(explanations, buildResultAndLogHealthChange(clusterState, allocation, "reroute commands")); } @@ -296,7 +316,7 @@ protected ClusterState reroute(final ClusterState clusterState, String reason, b // shuffle the unassigned nodes, just so we won't have things like poison failed shards routingNodes.unassigned().shuffle(); RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, - clusterInfoService.getClusterInfo(), currentNanoTime(), false); + clusterInfoService.getClusterInfo(), currentNanoTime()); allocation.debugDecision(debug); reroute(allocation); if (allocation.routingNodesChanged() == false) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java index e1ae367bebf76..abc363931c1e4 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java @@ -66,8 +66,6 @@ public class RoutingAllocation { private boolean ignoreDisable = false; - private final boolean retryFailed; - private DebugMode debugDecision = DebugMode.OFF; private boolean hasPendingAsyncFetch = false; @@ -90,7 +88,7 @@ public class RoutingAllocation { * @param currentNanoTime the nano time to use for all delay allocation calculation (typically {@link System#nanoTime()}) */ public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, ClusterInfo clusterInfo, - long currentNanoTime, boolean retryFailed) { + long currentNanoTime) { this.deciders = deciders; this.routingNodes = routingNodes; this.metaData = clusterState.metaData(); @@ -99,7 +97,6 @@ public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, this.customs = clusterState.customs(); this.clusterInfo = clusterInfo; this.currentNanoTime = currentNanoTime; - this.retryFailed = retryFailed; } /** returns the nano time captured at the beginning of the allocation. used to make sure all time based decisions are aligned */ @@ -285,10 +282,6 @@ public void setHasPendingAsyncFetch() { this.hasPendingAsyncFetch = true; } - public boolean isRetryFailed() { - return retryFailed; - } - public enum DebugMode { /** * debug mode is off diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java index 59a836abece37..c3817b429bbf3 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java @@ -34,7 +34,6 @@ * Note: This allocation decider also allows allocation of repeatedly failing shards when the /_cluster/reroute?retry_failed=true * API is manually invoked. This allows single retries without raising the limits. * - * @see RoutingAllocation#isRetryFailed() */ public class MaxRetryAllocationDecider extends AllocationDecider { @@ -59,14 +58,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocat if (unassignedInfo != null && unassignedInfo.getNumFailedAllocations() > 0) { final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index()); final int maxRetry = SETTING_ALLOCATION_MAX_RETRY.get(indexMetaData.getSettings()); - if (allocation.isRetryFailed()) { // manual allocation - retry - // if we are called via the _reroute API we ignore the failure counter and try to allocate - // this improves the usability since people don't need to raise the limits to issue retries since a simple _reroute call is - // enough to manually retry. - decision = allocation.decision(Decision.YES, NAME, "shard has exceeded the maximum number of retries [%d] on " + - "failed allocation attempts - retrying once due to a manual reroute command, [%s]", - maxRetry, unassignedInfo.toString()); - } else if (unassignedInfo.getNumFailedAllocations() >= maxRetry) { + if (unassignedInfo.getNumFailedAllocations() >= maxRetry) { decision = allocation.decision(Decision.NO, NAME, "shard has exceeded the maximum number of retries [%d] on " + "failed allocation attempts - manually call [/_cluster/reroute?retry_failed=true] to retry, [%s]", maxRetry, unassignedInfo.toString()); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java index 52a5184032484..898294264274e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java @@ -79,7 +79,8 @@ private Decision isVersionCompatible(final RoutingNodes routingNodes, final Stri return allocation.decision(Decision.YES, NAME, "target node version [%s] is the same or newer than source node version [%s]", target.node().getVersion(), source.node().getVersion()); } else { - return allocation.decision(Decision.NO, NAME, "target node version [%s] is older than the source node version [%s]", + return allocation.decision(Decision.NO, NAME, "target node version [%s] is older than the source node version [%s] and may " + + "not support codecs or postings formats for a newer Lucene version", target.node().getVersion(), source.node().getVersion()); } } @@ -90,7 +91,8 @@ private Decision isVersionCompatible(SnapshotRecoverySource recoverySource, fina return allocation.decision(Decision.YES, NAME, "target node version [%s] is the same or newer than snapshot version [%s]", target.node().getVersion(), recoverySource.version()); } else { - return allocation.decision(Decision.NO, NAME, "target node version [%s] is older than the snapshot version [%s]", + return allocation.decision(Decision.NO, NAME, "target node version [%s] is older than the snapshot version [%s] and may " + + "not support codecs or postings formats for a newer Lucene version", target.node().getVersion(), recoverySource.version()); } } diff --git a/core/src/main/java/org/elasticsearch/common/LegacyTimeBasedUUIDGenerator.java b/core/src/main/java/org/elasticsearch/common/LegacyTimeBasedUUIDGenerator.java index 2bf19f1dcbb97..74a08711042f7 100644 --- a/core/src/main/java/org/elasticsearch/common/LegacyTimeBasedUUIDGenerator.java +++ b/core/src/main/java/org/elasticsearch/common/LegacyTimeBasedUUIDGenerator.java @@ -23,8 +23,9 @@ import java.util.concurrent.atomic.AtomicInteger; /** - * These are essentially flake ids (http://boundary.com/blog/2012/01/12/flake-a-decentralized-k-ordered-unique-id-generator-in-erlang) but - * we use 6 (not 8) bytes for timestamp, and use 3 (not 2) bytes for sequence number. + * These are essentially flake ids, but we use 6 (not 8) bytes for timestamp, and use 3 (not 2) bytes for sequence number. + * For more information about flake ids, check out + * https://archive.fo/2015.07.08-082503/http://www.boundary.com/blog/2012/01/flake-a-decentralized-k-ordered-unique-id-generator-in-erlang/ */ class LegacyTimeBasedUUIDGenerator implements UUIDGenerator { diff --git a/core/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java b/core/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java index 550559eac9f6b..c30a8d0aaa222 100644 --- a/core/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java +++ b/core/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java @@ -22,9 +22,13 @@ import java.util.Base64; import java.util.concurrent.atomic.AtomicInteger; -/** These are essentially flake ids (http://boundary.com/blog/2012/01/12/flake-a-decentralized-k-ordered-unique-id-generator-in-erlang) but - * we use 6 (not 8) bytes for timestamp, and use 3 (not 2) bytes for sequence number. We also reorder bytes in a way that does not make ids - * sort in order anymore, but is more friendly to the way that the Lucene terms dictionary is structured. */ +/** + * These are essentially flake ids but we use 6 (not 8) bytes for timestamp, and use 3 (not 2) bytes for sequence number. We also reorder + * bytes in a way that does not make ids sort in order anymore, but is more friendly to the way that the Lucene terms dictionary is + * structured. + * For more information about flake ids, check out + * https://archive.fo/2015.07.08-082503/http://www.boundary.com/blog/2012/01/flake-a-decentralized-k-ordered-unique-id-generator-in-erlang/ + */ class TimeBasedUUIDGenerator implements UUIDGenerator { diff --git a/core/src/main/java/org/elasticsearch/common/cache/Cache.java b/core/src/main/java/org/elasticsearch/common/cache/Cache.java index df30123c35b42..91d011ba03cad 100644 --- a/core/src/main/java/org/elasticsearch/common/cache/Cache.java +++ b/core/src/main/java/org/elasticsearch/common/cache/Cache.java @@ -34,6 +34,7 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.BiFunction; +import java.util.function.Consumer; import java.util.function.Predicate; import java.util.function.ToLongBiFunction; @@ -195,14 +196,15 @@ private static class CacheSegment { /** * get an entry from the segment; expired entries will be returned as null but not removed from the cache until the LRU list is - * pruned or a manual {@link Cache#refresh()} is performed + * pruned or a manual {@link Cache#refresh()} is performed however a caller can take action using the provided callback * * @param key the key of the entry to get from the cache * @param now the access time of this entry * @param isExpired test if the entry is expired + * @param onExpiration a callback if the entry associated to the key is expired * @return the entry if there was one, otherwise null */ - Entry get(K key, long now, Predicate> isExpired) { + Entry get(K key, long now, Predicate> isExpired, Consumer> onExpiration) { CompletableFuture> future; Entry entry = null; try (ReleasableLock ignored = readLock.acquire()) { @@ -217,6 +219,10 @@ Entry get(K key, long now, Predicate> isExpired) { return ok; } else { segmentStats.miss(); + if (ok != null) { + assert isExpired.test(ok); + onExpiration.accept(ok); + } return null; } }).get(); @@ -330,12 +336,12 @@ void eviction() { * @return the value to which the specified key is mapped, or null if this map contains no mapping for the key */ public V get(K key) { - return get(key, now()); + return get(key, now(), e -> {}); } - private V get(K key, long now) { + private V get(K key, long now, Consumer> onExpiration) { CacheSegment segment = getCacheSegment(key); - Entry entry = segment.get(key, now, e -> isExpired(e, now)); + Entry entry = segment.get(key, now, e -> isExpired(e, now), onExpiration); if (entry == null) { return null; } else { @@ -360,7 +366,12 @@ private V get(K key, long now) { */ public V computeIfAbsent(K key, CacheLoader loader) throws ExecutionException { long now = now(); - V value = get(key, now); + // we have to eagerly evict expired entries or our putIfAbsent call below will fail + V value = get(key, now, e -> { + try (ReleasableLock ignored = lruLock.acquire()) { + evictEntry(e); + } + }); if (value == null) { // we need to synchronize loading of a value for a given key; however, holding the segment lock while // invoking load can lead to deadlock against another thread due to dependent key loading; therefore, we @@ -691,13 +702,18 @@ private void evict(long now) { assert lruLock.isHeldByCurrentThread(); while (tail != null && shouldPrune(tail, now)) { - CacheSegment segment = getCacheSegment(tail.key); - Entry entry = tail; - if (segment != null) { - segment.remove(tail.key); - } - delete(entry, RemovalNotification.RemovalReason.EVICTED); + evictEntry(tail); + } + } + + private void evictEntry(Entry entry) { + assert lruLock.isHeldByCurrentThread(); + + CacheSegment segment = getCacheSegment(entry.key); + if (segment != null) { + segment.remove(entry.key); } + delete(entry, RemovalNotification.RemovalReason.EVICTED); } private void delete(Entry entry, RemovalNotification.RemovalReason removalReason) { diff --git a/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java b/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java index 2b37c338c9a40..f8ccd827019a4 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java @@ -32,7 +32,7 @@ import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import java.io.IOException; @@ -138,7 +138,7 @@ DocIdAndSeqNo lookupSeqNo(BytesRef id, LeafReaderContext context) throws IOExcep if (seqNos != null && seqNos.advanceExact(docID)) { seqNo = seqNos.longValue(); } else { - seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; } return new DocIdAndSeqNo(docID, seqNo, context); } else { diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index 05b7d96c8f6db..fcf88cc31af5d 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -212,21 +212,31 @@ synchronized void addSettingsUpdater(SettingUpdater updater) { } /** - * Adds a settings consumer that accepts the values for two settings. The consumer if only notified if one or both settings change. + * Adds a settings consumer that accepts the values for two settings. + * See {@link #addSettingsUpdateConsumer(Setting, Setting, BiConsumer, BiConsumer)} for details. + */ + public synchronized void addSettingsUpdateConsumer(Setting a, Setting b, BiConsumer consumer) { + addSettingsUpdateConsumer(a, b, consumer, (i, j) -> {} ); + } + + /** + * Adds a settings consumer that accepts the values for two settings. The consumer is only notified if one or both settings change + * and if the provided validator succeeded. *

* Note: Only settings registered in {@link SettingsModule} can be changed dynamically. *

- * This method registers a compound updater that is useful if two settings are depending on each other. The consumer is always provided - * with both values even if only one of the two changes. + * This method registers a compound updater that is useful if two settings are depending on each other. + * The consumer is always provided with both values even if only one of the two changes. */ - public synchronized void addSettingsUpdateConsumer(Setting
a, Setting b, BiConsumer consumer) { + public synchronized void addSettingsUpdateConsumer(Setting a, Setting b, + BiConsumer consumer, BiConsumer validator) { if (a != get(a.getKey())) { throw new IllegalArgumentException("Setting is not registered for key [" + a.getKey() + "]"); } if (b != get(b.getKey())) { throw new IllegalArgumentException("Setting is not registered for key [" + b.getKey() + "]"); } - addSettingsUpdater(Setting.compoundUpdater(consumer, a, b, logger)); + addSettingsUpdater(Setting.compoundUpdater(consumer, validator, a, b, logger)); } /** diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index ac582191c3587..e1374fc6033eb 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -356,6 +356,7 @@ public void apply(Settings value, Settings current, Settings previous) { UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT, SearchService.DEFAULT_KEEPALIVE_SETTING, SearchService.KEEPALIVE_INTERVAL_SETTING, + SearchService.MAX_KEEPALIVE_SETTING, SearchService.LOW_LEVEL_CANCELLATION_SETTING, Node.WRITE_PORTS_FILE_SETTING, Node.NODE_NAME_SETTING, diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 1b57a2919f3b1..5a6c17bf2f0c1 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.common.settings; -import org.elasticsearch.index.IndexSortConfig; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; @@ -27,6 +26,7 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexSortConfig; import org.elasticsearch.index.IndexingSlowLog; import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.MergeSchedulerConfig; @@ -110,6 +110,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.INDEX_WARMER_ENABLED_SETTING, IndexSettings.INDEX_REFRESH_INTERVAL_SETTING, IndexSettings.MAX_RESULT_WINDOW_SETTING, + IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING, IndexSettings.MAX_RESCORE_WINDOW_SETTING, IndexSettings.MAX_ADJACENCY_MATRIX_FILTERS_SETTING, IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING, diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index 3b87191577f61..1f807dbc0f20f 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -479,7 +479,7 @@ AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, Logger * See {@link AbstractScopedSettings#addSettingsUpdateConsumer(Setting, Setting, BiConsumer)} and its usage for details. */ static AbstractScopedSettings.SettingUpdater> compoundUpdater(final BiConsumer consumer, - final Setting aSetting, final Setting bSetting, Logger logger) { + final BiConsumer validator, final Setting aSetting, final Setting bSetting, Logger logger) { final AbstractScopedSettings.SettingUpdater aSettingUpdater = aSetting.newUpdater(null, logger); final AbstractScopedSettings.SettingUpdater bSettingUpdater = bSetting.newUpdater(null, logger); return new AbstractScopedSettings.SettingUpdater>() { @@ -490,7 +490,10 @@ public boolean hasChanged(Settings current, Settings previous) { @Override public Tuple getValue(Settings current, Settings previous) { - return new Tuple<>(aSettingUpdater.getValue(current, previous), bSettingUpdater.getValue(current, previous)); + A valueA = aSettingUpdater.getValue(current, previous); + B valueB = bSettingUpdater.getValue(current, previous); + validator.accept(valueA, valueB); + return new Tuple<>(valueA, valueB); } @Override diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutor.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutor.java index 2d1be51824efe..8062d5510c7bf 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutor.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutor.java @@ -225,7 +225,7 @@ protected void afterExecute(Runnable r, Throwable t) { // - Since taskCount will now be incremented forever, it will never be 10 again, // so there will be no further adjustments logger.debug("[{}]: too many incoming tasks while queue size adjustment occurs, resetting measurements to 0", name); - totalTaskNanos.getAndSet(0); + totalTaskNanos.getAndSet(1); taskCount.getAndSet(0); startNs = System.nanoTime(); } else { diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index 0ecf40e65a1ba..179692cd516c8 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -19,6 +19,8 @@ package org.elasticsearch.discovery; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.cluster.service.MasterService; @@ -36,12 +38,15 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; +import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.Supplier; @@ -62,7 +67,7 @@ public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportServic ClusterApplier clusterApplier, ClusterSettings clusterSettings, List plugins, AllocationService allocationService) { final UnicastHostsProvider hostsProvider; - + final Collection> joinValidators = new ArrayList<>(); Map> hostProviders = new HashMap<>(); for (DiscoveryPlugin plugin : plugins) { plugin.getZenHostsProviders(transportService, networkService).entrySet().forEach(entry -> { @@ -70,6 +75,10 @@ public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportServic throw new IllegalArgumentException("Cannot register zen hosts provider [" + entry.getKey() + "] twice"); } }); + BiConsumer joinValidator = plugin.getJoinValidator(); + if (joinValidator != null) { + joinValidators.add(joinValidator); + } } Optional hostsProviderName = DISCOVERY_HOSTS_PROVIDER_SETTING.get(settings); if (hostsProviderName.isPresent()) { @@ -85,7 +94,7 @@ public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportServic Map> discoveryTypes = new HashMap<>(); discoveryTypes.put("zen", () -> new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier, - clusterSettings, hostsProvider, allocationService)); + clusterSettings, hostsProvider, allocationService, Collections.unmodifiableCollection(joinValidators))); discoveryTypes.put("single-node", () -> new SingleNodeDiscovery(settings, transportService, masterService, clusterApplier)); for (DiscoveryPlugin plugin : plugins) { plugin.getDiscoveryTypes(threadPool, transportService, namedWriteableRegistry, diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java index 18cac5818049f..fdfcd8ac29079 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java @@ -39,7 +39,10 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.Collection; import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; +import java.util.function.Supplier; public class MembershipAction extends AbstractComponent { @@ -63,7 +66,8 @@ public interface MembershipListener { private final MembershipListener listener; - public MembershipAction(Settings settings, TransportService transportService, MembershipListener listener) { + public MembershipAction(Settings settings, TransportService transportService, MembershipListener listener, + Collection> joinValidators) { super(settings); this.transportService = transportService; this.listener = listener; @@ -73,7 +77,7 @@ public MembershipAction(Settings settings, TransportService transportService, Me ThreadPool.Names.GENERIC, new JoinRequestRequestHandler()); transportService.registerRequestHandler(DISCOVERY_JOIN_VALIDATE_ACTION_NAME, () -> new ValidateJoinRequest(), ThreadPool.Names.GENERIC, - new ValidateJoinRequestRequestHandler()); + new ValidateJoinRequestRequestHandler(transportService::getLocalNode, joinValidators)); transportService.registerRequestHandler(DISCOVERY_LEAVE_ACTION_NAME, LeaveRequest::new, ThreadPool.Names.GENERIC, new LeaveRequestRequestHandler()); } @@ -176,12 +180,20 @@ public void writeTo(StreamOutput out) throws IOException { } static class ValidateJoinRequestRequestHandler implements TransportRequestHandler { + private final Supplier localNodeSupplier; + private final Collection> joinValidators; + + ValidateJoinRequestRequestHandler(Supplier localNodeSupplier, + Collection> joinValidators) { + this.localNodeSupplier = localNodeSupplier; + this.joinValidators = joinValidators; + } @Override public void messageReceived(ValidateJoinRequest request, TransportChannel channel) throws Exception { - ensureNodesCompatibility(Version.CURRENT, request.state.getNodes()); - ensureIndexCompatibility(Version.CURRENT, request.state.getMetaData()); - // for now, the mere fact that we can serialize the cluster state acts as validation.... + DiscoveryNode node = localNodeSupplier.get(); + assert node != null : "local node is null"; + joinValidators.stream().forEach(action -> action.accept(node, request.state)); channel.sendResponse(TransportResponse.Empty.INSTANCE); } } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index a4817fada36d2..249cce73765be 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -69,6 +69,8 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Set; @@ -78,6 +80,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.stream.Collectors; @@ -146,15 +149,17 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover private final NodeJoinController nodeJoinController; private final NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; - private final ClusterApplier clusterApplier; private final AtomicReference committedState; // last committed cluster state private final Object stateMutex = new Object(); + private final Collection> onJoinValidators; public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, MasterService masterService, ClusterApplier clusterApplier, - ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider, AllocationService allocationService) { + ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider, AllocationService allocationService, + Collection> onJoinValidators) { super(settings); + this.onJoinValidators = addBuiltInJoinValidators(onJoinValidators); this.masterService = masterService; this.clusterApplier = clusterApplier; this.transportService = transportService; @@ -211,7 +216,7 @@ public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService t namedWriteableRegistry, this, discoverySettings); - this.membership = new MembershipAction(settings, transportService, new MembershipListener()); + this.membership = new MembershipAction(settings, transportService, new MembershipListener(), onJoinValidators); this.joinThreadControl = new JoinThreadControl(); this.nodeJoinController = new NodeJoinController(masterService, allocationService, electMaster, settings); @@ -223,6 +228,17 @@ public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService t DISCOVERY_REJOIN_ACTION_NAME, RejoinClusterRequest::new, ThreadPool.Names.SAME, new RejoinClusterRequestHandler()); } + static Collection> addBuiltInJoinValidators( + Collection> onJoinValidators) { + Collection> validators = new ArrayList<>(); + validators.add((node, state) -> { + MembershipAction.ensureNodesCompatibility(node.getVersion(), state.getNodes()); + MembershipAction.ensureIndexCompatibility(node.getVersion(), state.getMetaData()); + }); + validators.addAll(onJoinValidators); + return Collections.unmodifiableCollection(validators); + } + // protected to allow overriding in tests protected ZenPing newZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, UnicastHostsProvider hostsProvider) { @@ -885,8 +901,7 @@ void handleJoinRequest(final DiscoveryNode node, final ClusterState state, final } else { // we do this in a couple of places including the cluster update thread. This one here is really just best effort // to ensure we fail as fast as possible. - MembershipAction.ensureNodesCompatibility(node.getVersion(), state.getNodes()); - MembershipAction.ensureIndexCompatibility(node.getVersion(), state.getMetaData()); + onJoinValidators.stream().forEach(a -> a.accept(node, state)); if (state.getBlocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) == false) { MembershipAction.ensureMajorVersionBarrier(node.getVersion(), state.getNodes().getMinNodeVersion()); } @@ -898,7 +913,8 @@ void handleJoinRequest(final DiscoveryNode node, final ClusterState state, final try { membership.sendValidateJoinRequestBlocking(node, state, joinTimeout); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to validate incoming join request from node [{}]", node), e); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to validate incoming join request from node [{}]", node), + e); callback.onFailure(new IllegalStateException("failure when sending a validation request to node", e)); return; } @@ -1313,4 +1329,9 @@ public void start() { } } + + public final Collection> getOnJoinValidators() { + return onJoinValidators; + } + } diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java index d5156ba68f0e1..0bb9db0130472 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -91,6 +91,13 @@ public final class IndexSettings { */ public static final Setting MAX_RESULT_WINDOW_SETTING = Setting.intSetting("index.max_result_window", 10000, 1, Property.Dynamic, Property.IndexScope); + /** + * Index setting describing the maximum value of from + size on an individual inner hit definition or + * top hits aggregation. The default maximum of 100 is defensive for the reason that the number of inner hit responses + * and number of top hits buckets returned is unbounded. Profile your cluster when increasing this setting. + */ + public static final Setting MAX_INNER_RESULT_WINDOW_SETTING = + Setting.intSetting("index.max_inner_result_window", 100, 1, Property.Dynamic, Property.IndexScope); /** * Index setting describing the maximum size of the rescore window. Defaults to {@link #MAX_RESULT_WINDOW_SETTING} * because they both do the same thing: control the size of the heap of hits. @@ -211,6 +218,7 @@ public final class IndexSettings { private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis(); private volatile boolean warmerEnabled; private volatile int maxResultWindow; + private volatile int maxInnerResultWindow; private volatile int maxAdjacencyMatrixFilters; private volatile int maxRescoreWindow; private volatile boolean TTLPurgeDisabled; @@ -311,6 +319,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti gcDeletesInMillis = scopedSettings.get(INDEX_GC_DELETES_SETTING).getMillis(); warmerEnabled = scopedSettings.get(INDEX_WARMER_ENABLED_SETTING); maxResultWindow = scopedSettings.get(MAX_RESULT_WINDOW_SETTING); + maxInnerResultWindow = scopedSettings.get(MAX_INNER_RESULT_WINDOW_SETTING); maxAdjacencyMatrixFilters = scopedSettings.get(MAX_ADJACENCY_MATRIX_FILTERS_SETTING); maxRescoreWindow = scopedSettings.get(MAX_RESCORE_WINDOW_SETTING); TTLPurgeDisabled = scopedSettings.get(INDEX_TTL_DISABLE_PURGE_SETTING); @@ -339,6 +348,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_DURABILITY_SETTING, this::setTranslogDurability); scopedSettings.addSettingsUpdateConsumer(INDEX_TTL_DISABLE_PURGE_SETTING, this::setTTLPurgeDisabled); scopedSettings.addSettingsUpdateConsumer(MAX_RESULT_WINDOW_SETTING, this::setMaxResultWindow); + scopedSettings.addSettingsUpdateConsumer(MAX_INNER_RESULT_WINDOW_SETTING, this::setMaxInnerResultWindow); scopedSettings.addSettingsUpdateConsumer(MAX_ADJACENCY_MATRIX_FILTERS_SETTING, this::setMaxAdjacencyMatrixFilters); scopedSettings.addSettingsUpdateConsumer(MAX_RESCORE_WINDOW_SETTING, this::setMaxRescoreWindow); scopedSettings.addSettingsUpdateConsumer(INDEX_WARMER_ENABLED_SETTING, this::setEnableWarmer); @@ -564,6 +574,17 @@ private void setMaxResultWindow(int maxResultWindow) { this.maxResultWindow = maxResultWindow; } + /** + * Returns the max result window for an individual inner hit definition or top hits aggregation. + */ + public int getMaxInnerResultWindow() { + return maxInnerResultWindow; + } + + private void setMaxInnerResultWindow(int maxInnerResultWindow) { + this.maxInnerResultWindow = maxInnerResultWindow; + } + /** * Returns the max number of filters in adjacency_matrix aggregation search requests */ diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 9c11264e86aec..9fd46d53049f2 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -65,6 +65,7 @@ import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.merge.MergeStats; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; @@ -396,7 +397,7 @@ public IndexResult(long version, long seqNo, boolean created) { * (e.g while preparing operation or updating mappings) * */ public IndexResult(Exception failure, long version) { - this(failure, version, SequenceNumbersService.UNASSIGNED_SEQ_NO); + this(failure, version, SequenceNumbers.UNASSIGNED_SEQ_NO); } public IndexResult(Exception failure, long version, long seqNo) { @@ -1046,7 +1047,7 @@ public Index(Term uid, ParsedDocument doc) { Index(Term uid, ParsedDocument doc, long version) { // use a primary term of 2 to allow tests to reduce it to a valid >0 term - this(uid, doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 2, version, VersionType.INTERNAL, + this(uid, doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 2, version, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime(), -1, false); } // TEST ONLY @@ -1122,7 +1123,7 @@ public Delete(String type, String id, Term uid, long seqNo, long primaryTerm, lo } public Delete(String type, String id, Term uid) { - this(type, id, uid, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime()); + this(type, id, uid, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime()); } public Delete(Delete template, VersionType versionType) { diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 5ede2ff872c21..4bd4634a8cb09 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -179,14 +179,14 @@ public InternalEngine(EngineConfig engineConfig) throws EngineException { break; case OPEN_INDEX_CREATE_TRANSLOG: writer = createWriter(false); - seqNoStats = store.loadSeqNoStats(SequenceNumbersService.UNASSIGNED_SEQ_NO); + seqNoStats = store.loadSeqNoStats(SequenceNumbers.UNASSIGNED_SEQ_NO); break; case CREATE_INDEX_AND_TRANSLOG: writer = createWriter(true); seqNoStats = new SeqNoStats( - SequenceNumbersService.NO_OPS_PERFORMED, - SequenceNumbersService.NO_OPS_PERFORMED, - SequenceNumbersService.UNASSIGNED_SEQ_NO); + SequenceNumbers.NO_OPS_PERFORMED, + SequenceNumbers.NO_OPS_PERFORMED, + SequenceNumbers.UNASSIGNED_SEQ_NO); break; default: throw new IllegalArgumentException(openMode.toString()); @@ -463,7 +463,7 @@ enum OpVsLuceneDocStatus { } private OpVsLuceneDocStatus compareOpToLuceneDocBasedOnSeqNo(final Operation op) throws IOException { - assert op.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO : "resolving ops based on seq# but no seqNo is found"; + assert op.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO : "resolving ops based on seq# but no seqNo is found"; final OpVsLuceneDocStatus status; final VersionValue versionValue = versionMap.getUnderLock(op.uid()); assert incrementVersionLookup(); @@ -507,7 +507,7 @@ private VersionValue resolveDocVersion(final Operation op) throws IOException { assert incrementIndexVersionLookup(); // used for asserting in tests final long currentVersion = loadCurrentVersionFromIndex(op.uid()); if (currentVersion != Versions.NOT_FOUND) { - versionValue = new VersionValue(currentVersion, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0L); + versionValue = new VersionValue(currentVersion, SequenceNumbers.UNASSIGNED_SEQ_NO, 0L); } } else if (engineConfig.isEnableGcDeletes() && versionValue.isDelete() && (engineConfig.getThreadPool().relativeTimeInMillis() - ((DeleteVersionValue)versionValue).time) > getGcDeletesInMillis()) { @@ -518,7 +518,7 @@ private VersionValue resolveDocVersion(final Operation op) throws IOException { private OpVsLuceneDocStatus compareOpToLuceneDocBasedOnVersions(final Operation op) throws IOException { - assert op.seqNo() == SequenceNumbersService.UNASSIGNED_SEQ_NO : "op is resolved based on versions but have a seq#"; + assert op.seqNo() == SequenceNumbers.UNASSIGNED_SEQ_NO : "op is resolved based on versions but have a seq#"; assert op.version() >= 0 : "versions should be non-negative. got " + op.version(); final VersionValue versionValue = resolveDocVersion(op); if (versionValue == null) { @@ -570,11 +570,11 @@ private boolean assertVersionType(final Engine.Operation operation) { private boolean assertIncomingSequenceNumber(final Engine.Operation.Origin origin, final long seqNo) { if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1) && origin == Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { // legacy support - assert seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO : "old op recovering but it already has a seq no.;" + + assert seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO : "old op recovering but it already has a seq no.;" + " index version: " + engineConfig.getIndexSettings().getIndexVersionCreated() + ", seqNo: " + seqNo; } else if (origin == Operation.Origin.PRIMARY) { // sequence number should not be set when operation origin is primary - assert seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO : "primary ops should never have an assigned seq no.; seqNo: " + seqNo; + assert seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO : "primary ops should never have an assigned seq no.; seqNo: " + seqNo; } else if (engineConfig.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1)) { // sequence number should be set when operation origin is not primary assert seqNo >= 0 : "recovery or replica ops should have an assigned seq no.; origin: " + origin; @@ -651,7 +651,7 @@ public IndexResult index(Index index) throws IOException { final Translog.Location location; if (indexResult.hasFailure() == false) { location = translog.add(new Translog.Index(index, indexResult)); - } else if (indexResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { + } else if (indexResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { // if we have document failure, record it as a no-op in the translog with the generated seq_no location = translog.add(new Translog.NoOp(indexResult.getSeqNo(), index.primaryTerm(), indexResult.getFailure().getMessage())); } else { @@ -659,7 +659,7 @@ public IndexResult index(Index index) throws IOException { } indexResult.setTranslogLocation(location); } - if (indexResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { + if (indexResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { seqNoService().markSeqNoAsCompleted(indexResult.getSeqNo()); } indexResult.setTook(System.nanoTime() - index.startTime()); @@ -692,7 +692,7 @@ private IndexingStrategy planIndexingAsNonPrimary(Index index) throws IOExceptio // this allows to ignore the case where a document was found in the live version maps in // a delete state and return false for the created flag in favor of code simplicity final OpVsLuceneDocStatus opVsLucene; - if (index.seqNo() == SequenceNumbersService.UNASSIGNED_SEQ_NO) { + if (index.seqNo() == SequenceNumbers.UNASSIGNED_SEQ_NO) { // This can happen if the primary is still on an old node and send traffic without seq# or we recover from translog // created by an old version. assert config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1) : @@ -873,7 +873,7 @@ static IndexingStrategy skipDueToVersionConflict( VersionConflictEngineException e, boolean currentNotFoundOrDeleted, long currentVersion) { final IndexResult result = new IndexResult(e, currentVersion); return new IndexingStrategy( - currentNotFoundOrDeleted, false, false, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, result); + currentNotFoundOrDeleted, false, false, SequenceNumbers.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, result); } static IndexingStrategy processNormally(boolean currentNotFoundOrDeleted, @@ -951,7 +951,7 @@ public DeleteResult delete(Delete delete) throws IOException { final Translog.Location location; if (deleteResult.hasFailure() == false) { location = translog.add(new Translog.Delete(delete, deleteResult)); - } else if (deleteResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { + } else if (deleteResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { location = translog.add(new Translog.NoOp(deleteResult.getSeqNo(), delete.primaryTerm(), deleteResult.getFailure().getMessage())); } else { @@ -959,7 +959,7 @@ public DeleteResult delete(Delete delete) throws IOException { } deleteResult.setTranslogLocation(location); } - if (deleteResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { + if (deleteResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { seqNoService().markSeqNoAsCompleted(deleteResult.getSeqNo()); } deleteResult.setTook(System.nanoTime() - delete.startTime()); @@ -987,7 +987,7 @@ private DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws IOExcept // this allows to ignore the case where a document was found in the live version maps in // a delete state and return true for the found flag in favor of code simplicity final OpVsLuceneDocStatus opVsLucene; - if (delete.seqNo() == SequenceNumbersService.UNASSIGNED_SEQ_NO) { + if (delete.seqNo() == SequenceNumbers.UNASSIGNED_SEQ_NO) { assert config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1) : "index is newly created but op has no sequence numbers. op: " + delete; opVsLucene = compareOpToLuceneDocBasedOnVersions(delete); @@ -1091,7 +1091,7 @@ private DeletionStrategy(boolean deleteFromLucene, boolean currentlyDeleted, static DeletionStrategy skipDueToVersionConflict( VersionConflictEngineException e, long currentVersion, boolean currentlyDeleted) { - final long unassignedSeqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + final long unassignedSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; final DeleteResult deleteResult = new DeleteResult(e, currentVersion, unassignedSeqNo, currentlyDeleted == false); return new DeletionStrategy(false, currentlyDeleted, unassignedSeqNo, Versions.NOT_FOUND, deleteResult); } @@ -1127,7 +1127,7 @@ public NoOpResult noOp(final NoOp noOp) { private NoOpResult innerNoOp(final NoOp noOp) throws IOException { assert readLock.isHeldByCurrentThread() || writeLock.isHeldByCurrentThread(); - assert noOp.seqNo() > SequenceNumbersService.NO_OPS_PERFORMED; + assert noOp.seqNo() > SequenceNumbers.NO_OPS_PERFORMED; final long seqNo = noOp.seqNo(); try { final NoOpResult noOpResult = new NoOpResult(noOp.seqNo()); @@ -1137,7 +1137,7 @@ private NoOpResult innerNoOp(final NoOp noOp) throws IOException { noOpResult.freeze(); return noOpResult; } finally { - if (seqNo != SequenceNumbersService.UNASSIGNED_SEQ_NO) { + if (seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { seqNoService().markSeqNoAsCompleted(seqNo); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index 28dbddc3a20cc..6de6e860a8f86 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -33,6 +33,7 @@ import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.Joda; @@ -246,7 +247,7 @@ long parse(String value) { @Override public Query termQuery(Object value, @Nullable QueryShardContext context) { - Query query = innerRangeQuery(value, value, true, true, null, null, context); + Query query = rangeQuery(value, value, true, true, ShapeRelation.INTERSECTS, null, null, context); if (boost() != 1f) { query = new BoostQuery(query, boost()); } @@ -254,20 +255,13 @@ public Query termQuery(Object value, @Nullable QueryShardContext context) { } @Override - public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { - failIfNotIndexed(); - return rangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, null, null, context); - } - - public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, - @Nullable DateTimeZone timeZone, @Nullable DateMathParser forcedDateParser, QueryShardContext context) { - failIfNotIndexed(); - return innerRangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, timeZone, forcedDateParser, context); - } - - Query innerRangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, ShapeRelation relation, @Nullable DateTimeZone timeZone, @Nullable DateMathParser forcedDateParser, QueryShardContext context) { failIfNotIndexed(); + if (relation == ShapeRelation.DISJOINT) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + + "] does not support DISJOINT ranges"); + } DateMathParser parser = forcedDateParser == null ? dateMathParser : forcedDateParser; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index b37367d992f21..cdf4bd07cebdf 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -175,14 +175,21 @@ private static MapperParsingException wrapInMapperParsingException(SourceToParse } private static String[] splitAndValidatePath(String fullFieldPath) { - String[] parts = fullFieldPath.split("\\."); - for (String part : parts) { - if (Strings.hasText(part) == false) { - throw new IllegalArgumentException( - "object field starting or ending with a [.] makes object resolution ambiguous: [" + fullFieldPath + "]"); + if (fullFieldPath.contains(".")) { + String[] parts = fullFieldPath.split("\\."); + for (String part : parts) { + if (Strings.hasText(part) == false) { + throw new IllegalArgumentException( + "object field starting or ending with a [.] makes object resolution ambiguous: [" + fullFieldPath + "]"); + } + } + return parts; + } else { + if (Strings.isEmpty(fullFieldPath)) { + throw new IllegalArgumentException("field name cannot be an empty string"); } + return new String[] {fullFieldPath}; } - return parts; } /** Creates a Mapping containing any dynamically added fields, or returns null if there were no dynamic mappings. */ diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java index 4396cba58cc28..c2923be4c74ab 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java @@ -258,9 +258,19 @@ protected void parseCreateField(ParseContext context, List field return; } for (ParseContext.Document document : context.docs()) { - final List paths = new ArrayList<>(); + final List paths = new ArrayList<>(document.getFields().size()); + String previousPath = ""; // used as a sentinel - field names can't be empty for (IndexableField field : document.getFields()) { - paths.add(field.name()); + final String path = field.name(); + if (path.equals(previousPath)) { + // Sometimes mappers create multiple Lucene fields, eg. one for indexing, + // one for doc values and one for storing. Deduplicating is not required + // for correctness but this simple check helps save utf-8 conversions and + // gives Lucene fewer values to deal with. + continue; + } + paths.add(path); + previousPath = path; } for (String path : paths) { for (String fieldName : extractFieldNames(path)) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index cdc81d4c225ed..faa486dd972b2 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -120,7 +120,7 @@ public Mapper.Builder parse(String name, Map node, ParserCo } } - public static final class IpFieldType extends MappedFieldType { + public static final class IpFieldType extends SimpleMappedFieldType { public IpFieldType() { super(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index cf0544a22dcae..2796a5342c1a5 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -35,6 +35,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -347,7 +348,15 @@ public Query termsQuery(List values, @Nullable QueryShardContext context) { return new ConstantScoreQuery(builder.build()); } - public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { + /** + * Factory method for range queries. + * @param relation the relation, nulls should be interpreted like INTERSECTS + */ + public Query rangeQuery( + Object lowerTerm, Object upperTerm, + boolean includeLower, boolean includeUpper, + ShapeRelation relation, DateTimeZone timeZone, DateMathParser parser, + QueryShardContext context) { throw new IllegalArgumentException("Field [" + name + "] of type [" + typeName() + "] does not support range queries"); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 3a91d0e42ad73..c9851ed7a1d05 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -148,7 +148,18 @@ public MapperService(IndexSettings indexSettings, IndexAnalyzers indexAnalyzers, this.searchQuoteAnalyzer = new MapperAnalyzerWrapper(indexAnalyzers.getDefaultSearchQuoteAnalyzer(), p -> p.searchQuoteAnalyzer()); this.mapperRegistry = mapperRegistry; - this.dynamic = this.indexSettings.getValue(INDEX_MAPPER_DYNAMIC_SETTING); + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1)) { + if (INDEX_MAPPER_DYNAMIC_SETTING.exists(indexSettings.getSettings())) { + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) { + throw new IllegalArgumentException("Setting " + INDEX_MAPPER_DYNAMIC_SETTING.getKey() + " was removed after version 6.0.0"); + } else { + DEPRECATION_LOGGER.deprecated("Setting " + INDEX_MAPPER_DYNAMIC_SETTING.getKey() + " is deprecated since indices may not have more than one type anymore."); + } + } + this.dynamic = INDEX_MAPPER_DYNAMIC_DEFAULT; + } else { + this.dynamic = this.indexSettings.getValue(INDEX_MAPPER_DYNAMIC_SETTING); + } defaultMappingSource = "{\"_default_\":{}}"; if (logger.isTraceEnabled()) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 84c05425aee7d..6f5190b637640 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -44,7 +44,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; -import org.elasticsearch.common.xcontent.support.AbstractXContentParser; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; @@ -836,7 +835,7 @@ private static double objectToDouble(Object value) { } } - public static final class NumberFieldType extends MappedFieldType { + public static final class NumberFieldType extends SimpleMappedFieldType { NumberType type; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java index d621deed2a164..cee3a597e2b91 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java @@ -162,7 +162,7 @@ public Mapper.Builder parse(String name, Map node, } } - public static final class ScaledFloatFieldType extends MappedFieldType { + public static final class ScaledFloatFieldType extends SimpleMappedFieldType { private double scalingFactor; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java index ec5b718a94b97..bcf901388f13b 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java @@ -35,7 +35,7 @@ import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import java.io.IOException; import java.util.List; @@ -78,8 +78,8 @@ public SequenceIDFields(Field seqNo, Field seqNoDocValue, Field primaryTerm) { } public static SequenceIDFields emptySeqID() { - return new SequenceIDFields(new LongPoint(NAME, SequenceNumbersService.UNASSIGNED_SEQ_NO), - new NumericDocValuesField(NAME, SequenceNumbersService.UNASSIGNED_SEQ_NO), + return new SequenceIDFields(new LongPoint(NAME, SequenceNumbers.UNASSIGNED_SEQ_NO), + new NumericDocValuesField(NAME, SequenceNumbers.UNASSIGNED_SEQ_NO), new NumericDocValuesField(PRIMARY_TERM_NAME, 0)); } } @@ -126,7 +126,7 @@ public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext c } } - static final class SeqNoFieldType extends MappedFieldType { + static final class SeqNoFieldType extends SimpleMappedFieldType { SeqNoFieldType() { } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/SimpleMappedFieldType.java b/core/src/main/java/org/elasticsearch/index/mapper/SimpleMappedFieldType.java new file mode 100644 index 0000000000000..b91be82cd6b26 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/mapper/SimpleMappedFieldType.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.search.Query; +import org.elasticsearch.common.geo.ShapeRelation; +import org.elasticsearch.common.joda.DateMathParser; +import org.elasticsearch.index.query.QueryShardContext; +import org.joda.time.DateTimeZone; + +/** + * {@link MappedFieldType} base impl for field types that are neither dates nor ranges. + */ +public abstract class SimpleMappedFieldType extends MappedFieldType { + + protected SimpleMappedFieldType() { + super(); + } + + protected SimpleMappedFieldType(MappedFieldType ref) { + super(ref); + } + + @Override + public final Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, + ShapeRelation relation, DateTimeZone timeZone, DateMathParser parser, QueryShardContext context) { + if (relation == ShapeRelation.DISJOINT) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + + "] does not support DISJOINT ranges"); + } + // We do not fail on non-null time zones and date parsers + // The reasoning is that on query parsers, you might want to set a time zone or format for date fields + // but then the API has no way to know which fields are dates and which fields are not dates + return rangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, context); + } + + /** + * Same as {@link #rangeQuery(Object, Object, boolean, boolean, ShapeRelation, DateTimeZone, DateMathParser, QueryShardContext)} + * but without the trouble of relations or date-specific options. + */ + protected Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, + QueryShardContext context) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support range queries"); + } + +} diff --git a/core/src/main/java/org/elasticsearch/index/mapper/TermBasedFieldType.java b/core/src/main/java/org/elasticsearch/index/mapper/TermBasedFieldType.java index ee3987399562b..9638fecb982e4 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/TermBasedFieldType.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/TermBasedFieldType.java @@ -32,7 +32,7 @@ /** Base {@link MappedFieldType} implementation for a field that is indexed * with the inverted index. */ -abstract class TermBasedFieldType extends MappedFieldType { +abstract class TermBasedFieldType extends SimpleMappedFieldType { TermBasedFieldType() {} diff --git a/core/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java index 127a444b9100a..9f9b508267224 100644 --- a/core/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java @@ -51,7 +51,7 @@ public class IdsQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "ids"; - private static final ParseField TYPE_FIELD = new ParseField("type", "types", "_type"); + private static final ParseField TYPE_FIELD = new ParseField("type"); private static final ParseField VALUES_FIELD = new ParseField("values"); private final Set ids = new HashSet<>(); diff --git a/core/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java b/core/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java index f13aa22f7d914..58d271bb8206c 100644 --- a/core/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query; -import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; @@ -47,8 +47,21 @@ protected InnerHitContextBuilder(QueryBuilder query, InnerHitBuilder innerHitBui this.query = query; } - public abstract void build(SearchContext parentSearchContext, - InnerHitsContext innerHitsContext) throws IOException; + public final void build(SearchContext parentSearchContext, InnerHitsContext innerHitsContext) throws IOException { + long innerResultWindow = innerHitBuilder.getFrom() + innerHitBuilder.getSize(); + int maxInnerResultWindow = parentSearchContext.mapperService().getIndexSettings().getMaxInnerResultWindow(); + if (innerResultWindow > maxInnerResultWindow) { + throw new IllegalArgumentException( + "Inner result window is too large, the inner hit definition's [" + innerHitBuilder.getName() + + "]'s from + size must be less than or equal to: [" + maxInnerResultWindow + "] but was [" + innerResultWindow + + "]. This limit can be set by changing the [" + IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey() + + "] index level setting." + ); + } + doBuild(parentSearchContext, innerHitsContext); + } + + protected abstract void doBuild(SearchContext parentSearchContext, InnerHitsContext innerHitsContext) throws IOException; public static void extractInnerHits(QueryBuilder query, Map innerHitBuilders) { if (query instanceof AbstractQueryBuilder) { diff --git a/core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index b9037110b1c59..4e3429e1a2088 100644 --- a/core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -336,7 +336,7 @@ static class NestedInnerHitContextBuilder extends InnerHitContextBuilder { } @Override - public void build(SearchContext parentSearchContext, + protected void doBuild(SearchContext parentSearchContext, InnerHitsContext innerHitsContext) throws IOException { QueryShardContext queryShardContext = parentSearchContext.getQueryShardContext(); ObjectMapper nestedObjectMapper = queryShardContext.getObjectMapper(path); diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/QueryBuilder.java index b6ee368b488be..f8f5b68be9afb 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryBuilder.java @@ -83,6 +83,7 @@ public interface QueryBuilder extends NamedWriteable, ToXContentObject, Rewritea * Rewrites this query builder into its primitive form. By default this method return the builder itself. If the builder * did not change the identity reference must be returned otherwise the builder will be rewritten infinitely. */ + @Override default QueryBuilder rewrite(QueryRewriteContext queryShardContext) throws IOException { return this; } diff --git a/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java index 0156710520da8..bdf2b6cf530df 100644 --- a/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java @@ -36,7 +36,6 @@ import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; @@ -453,7 +452,7 @@ protected MappedFieldType.Relation getRelation(QueryRewriteContext queryRewriteC // no field means we have no values return MappedFieldType.Relation.DISJOINT; } else { - DateMathParser dateMathParser = format == null ? null : new DateMathParser(format); + DateMathParser dateMathParser = getForceDateParser(); return fieldType.isFieldWithinQuery(shardContext.getIndexReader(), from, to, includeLower, includeUpper, timeZone, dateMathParser, queryRewriteContext); } @@ -503,25 +502,10 @@ protected Query doToQuery(QueryShardContext context) throws IOException { Query query = null; MappedFieldType mapper = context.fieldMapper(this.fieldName); if (mapper != null) { - if (mapper instanceof DateFieldMapper.DateFieldType) { - - query = ((DateFieldMapper.DateFieldType) mapper).rangeQuery(from, to, includeLower, includeUpper, - timeZone, getForceDateParser(), context); - } else if (mapper instanceof RangeFieldMapper.RangeFieldType) { - DateMathParser forcedDateParser = null; - if (mapper.typeName() == RangeFieldMapper.RangeType.DATE.name && this.format != null) { - forcedDateParser = new DateMathParser(this.format); - } - query = ((RangeFieldMapper.RangeFieldType) mapper).rangeQuery(from, to, includeLower, includeUpper, + DateMathParser forcedDateParser = getForceDateParser(); + query = mapper.rangeQuery( + from, to, includeLower, includeUpper, relation, timeZone, forcedDateParser, context); - } else { - if (timeZone != null) { - throw new QueryShardException(context, "[range] time_zone can not be applied to non date field [" - + fieldName + "]"); - } - //LUCENE 4 UPGRADE Mapper#rangeQuery should use bytesref as well? - query = mapper.rangeQuery(from, to, includeLower, includeUpper, context); - } } else { if (timeZone != null) { throw new QueryShardException(context, "[range] time_zone can not be applied to non unmapped field [" @@ -530,7 +514,9 @@ protected Query doToQuery(QueryShardContext context) throws IOException { } if (query == null) { - query = new TermRangeQuery(this.fieldName, BytesRefs.toBytesRef(from), BytesRefs.toBytesRef(to), includeLower, includeUpper); + query = new TermRangeQuery(this.fieldName, + BytesRefs.toBytesRef(from), BytesRefs.toBytesRef(to), + includeLower, includeUpper); } return query; } diff --git a/core/src/main/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskState.java b/core/src/main/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskState.java index 7631eaac65fc2..3a96259d04a8e 100644 --- a/core/src/main/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskState.java +++ b/core/src/main/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskState.java @@ -34,6 +34,7 @@ import java.util.concurrent.atomic.AtomicReference; import static java.lang.Math.max; +import static java.lang.Math.min; import static java.lang.Math.round; import static org.elasticsearch.common.unit.TimeValue.timeValueNanos; @@ -44,6 +45,11 @@ public class WorkerBulkByScrollTaskState implements SuccessfullyProcessed { private static final Logger logger = Loggers.getLogger(WorkerBulkByScrollTaskState.class); + /** + * Maximum wait time allowed for throttling. + */ + private static final TimeValue MAX_THROTTLE_WAIT_TIME = TimeValue.timeValueHours(1); + private final BulkByScrollTask task; /** @@ -189,7 +195,8 @@ public void delayPrepareBulkRequest(ThreadPool threadPool, TimeValue lastBatchSt public TimeValue throttleWaitTime(TimeValue lastBatchStartTime, TimeValue now, int lastBatchSize) { long earliestNextBatchStartTime = now.nanos() + (long) perfectlyThrottledBatchTime(lastBatchSize); - return timeValueNanos(max(0, earliestNextBatchStartTime - System.nanoTime())); + long waitTime = min(MAX_THROTTLE_WAIT_TIME.nanos(), max(0, earliestNextBatchStartTime - System.nanoTime())); + return timeValueNanos(waitTime); } /** diff --git a/core/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java b/core/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java index bc1a62f693930..e6846543b8cac 100644 --- a/core/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java +++ b/core/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java @@ -124,6 +124,7 @@ public static Map resolveMappingFields(QueryShardContext context, !multiField, !allField, fieldSuffix); resolvedFields.putAll(fieldMap); } + checkForTooManyFields(resolvedFields); return resolvedFields; } @@ -184,6 +185,13 @@ public static Map resolveMappingField(QueryShardContext context, } fields.put(fieldName, weight); } + checkForTooManyFields(fields); return fields; } + + private static void checkForTooManyFields(Map fields) { + if (fields.size() > 1024) { + throw new IllegalArgumentException("field expansion matches too many fields, limit: 1024, got: " + fields.size()); + } + } } diff --git a/core/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java b/core/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java index 351ab48484aea..339aeebf0a0ec 100644 --- a/core/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java @@ -46,11 +46,10 @@ import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.Fuzziness; -import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.StringFieldType; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.StringFieldType; import org.elasticsearch.index.query.ExistsQueryBuilder; import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.QueryShardContext; @@ -394,14 +393,8 @@ private Query getRangeQuerySingle(String field, String part1, String part2, Analyzer normalizer = forceAnalyzer == null ? queryBuilder.context.getSearchAnalyzer(currentFieldType) : forceAnalyzer; BytesRef part1Binary = part1 == null ? null : normalizer.normalize(field, part1); BytesRef part2Binary = part2 == null ? null : normalizer.normalize(field, part2); - Query rangeQuery; - if (currentFieldType instanceof DateFieldMapper.DateFieldType && timeZone != null) { - DateFieldMapper.DateFieldType dateFieldType = (DateFieldMapper.DateFieldType) this.currentFieldType; - rangeQuery = dateFieldType.rangeQuery(part1Binary, part2Binary, - startInclusive, endInclusive, timeZone, null, context); - } else { - rangeQuery = currentFieldType.rangeQuery(part1Binary, part2Binary, startInclusive, endInclusive, context); - } + Query rangeQuery = currentFieldType.rangeQuery(part1Binary, part2Binary, + startInclusive, endInclusive, null, timeZone, null, context); return rangeQuery; } catch (RuntimeException e) { if (lenient) { diff --git a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java index 447815cf9afa0..4df58bcab4459 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java @@ -195,7 +195,7 @@ public int hashCode() { private boolean invariant() { // local checkpoints only set during primary mode assert primaryMode || localCheckpoints.values().stream() - .allMatch(lcps -> lcps.localCheckpoint == SequenceNumbersService.UNASSIGNED_SEQ_NO || + .allMatch(lcps -> lcps.localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO || lcps.localCheckpoint == SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT); // relocation handoff can only occur in primary mode @@ -242,15 +242,15 @@ private boolean invariant() { /** * Initialize the global checkpoint service. The specified global checkpoint should be set to the last known global checkpoint, or - * {@link SequenceNumbersService#UNASSIGNED_SEQ_NO}. + * {@link SequenceNumbers#UNASSIGNED_SEQ_NO}. * * @param shardId the shard ID * @param indexSettings the index settings - * @param globalCheckpoint the last known global checkpoint for this shard, or {@link SequenceNumbersService#UNASSIGNED_SEQ_NO} + * @param globalCheckpoint the last known global checkpoint for this shard, or {@link SequenceNumbers#UNASSIGNED_SEQ_NO} */ GlobalCheckpointTracker(final ShardId shardId, final IndexSettings indexSettings, final long globalCheckpoint) { super(shardId, indexSettings); - assert globalCheckpoint >= SequenceNumbersService.UNASSIGNED_SEQ_NO : "illegal initial global checkpoint: " + globalCheckpoint; + assert globalCheckpoint >= SequenceNumbers.UNASSIGNED_SEQ_NO : "illegal initial global checkpoint: " + globalCheckpoint; this.primaryMode = false; this.handoffInProgress = false; this.appliedClusterStateVersion = -1L; @@ -314,9 +314,9 @@ public synchronized void activatePrimaryMode(final String allocationId, final lo assert invariant(); assert primaryMode == false; assert localCheckpoints.get(allocationId) != null && localCheckpoints.get(allocationId).inSync && - localCheckpoints.get(allocationId).localCheckpoint == SequenceNumbersService.UNASSIGNED_SEQ_NO : + localCheckpoints.get(allocationId).localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO : "expected " + allocationId + " to have initialized entry in " + localCheckpoints + " when activating primary"; - assert localCheckpoint >= SequenceNumbersService.NO_OPS_PERFORMED; + assert localCheckpoint >= SequenceNumbers.NO_OPS_PERFORMED; primaryMode = true; updateLocalCheckpoint(allocationId, localCheckpoints.get(allocationId), localCheckpoint); updateGlobalCheckpointOnPrimary(); @@ -354,19 +354,19 @@ public synchronized void updateFromMaster(final long applyingClusterStateVersion assert inSync == false : "update from master in primary mode has " + initializingId + " as in-sync but it does not exist locally"; final long localCheckpoint = pre60AllocationIds.contains(initializingId) ? - SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT : SequenceNumbersService.UNASSIGNED_SEQ_NO; + SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT : SequenceNumbers.UNASSIGNED_SEQ_NO; localCheckpoints.put(initializingId, new LocalCheckpointState(localCheckpoint, inSync)); } } } else { for (String initializingId : initializingAllocationIds) { final long localCheckpoint = pre60AllocationIds.contains(initializingId) ? - SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT : SequenceNumbersService.UNASSIGNED_SEQ_NO; + SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT : SequenceNumbers.UNASSIGNED_SEQ_NO; localCheckpoints.put(initializingId, new LocalCheckpointState(localCheckpoint, false)); } for (String inSyncId : inSyncAllocationIds) { final long localCheckpoint = pre60AllocationIds.contains(inSyncId) ? - SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT : SequenceNumbersService.UNASSIGNED_SEQ_NO; + SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT : SequenceNumbers.UNASSIGNED_SEQ_NO; localCheckpoints.put(inSyncId, new LocalCheckpointState(localCheckpoint, true)); } } @@ -413,7 +413,7 @@ public synchronized void markAllocationIdAsInSync(final String allocationId, fin // can happen if replica was removed from cluster but recovery process is unaware of it yet throw new IllegalStateException("no local checkpoint tracking information available for " + allocationId); } - assert localCheckpoint >= SequenceNumbersService.NO_OPS_PERFORMED : + assert localCheckpoint >= SequenceNumbers.NO_OPS_PERFORMED : "expected known local checkpoint for " + allocationId + " but was " + localCheckpoint; assert pendingInSync.contains(allocationId) == false : "shard copy " + allocationId + " is already marked as pending in-sync"; updateLocalCheckpoint(allocationId, lcps, localCheckpoint); @@ -451,7 +451,7 @@ private boolean updateLocalCheckpoint(String allocationId, LocalCheckpointState localCheckpoint == SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT : "pre-6.0 shard copy " + allocationId + " unexpected to send valid local checkpoint " + localCheckpoint; // a local checkpoint for a shard copy should be a valid sequence number or the pre-6.0 sequence number indicator - assert localCheckpoint != SequenceNumbersService.UNASSIGNED_SEQ_NO : + assert localCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO : "invalid local checkpoint for shard copy [" + allocationId + "]"; if (localCheckpoint > lcps.localCheckpoint) { logger.trace("updated local checkpoint of [{}] from [{}] to [{}]", allocationId, lcps.localCheckpoint, localCheckpoint); @@ -508,7 +508,7 @@ private static long computeGlobalCheckpoint(final Set pendingInSync, fin } for (final LocalCheckpointState lcps : localCheckpoints) { if (lcps.inSync) { - if (lcps.localCheckpoint == SequenceNumbersService.UNASSIGNED_SEQ_NO) { + if (lcps.localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO) { // unassigned in-sync replica return fallback; } else if (lcps.localCheckpoint == SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT) { @@ -580,9 +580,9 @@ public synchronized void completeRelocationHandoff() { handoffInProgress = false; // forget all checkpoint information localCheckpoints.values().stream().forEach(lcps -> { - if (lcps.localCheckpoint != SequenceNumbersService.UNASSIGNED_SEQ_NO && + if (lcps.localCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO && lcps.localCheckpoint != SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT) { - lcps.localCheckpoint = SequenceNumbersService.UNASSIGNED_SEQ_NO; + lcps.localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; } }); assert invariant(); diff --git a/core/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java b/core/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java index 9af9f00b1d120..6a7844057fd00 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java @@ -67,26 +67,26 @@ public class LocalCheckpointTracker { /** * Initialize the local checkpoint service. The {@code maxSeqNo} should be set to the last sequence number assigned, or - * {@link SequenceNumbersService#NO_OPS_PERFORMED} and {@code localCheckpoint} should be set to the last known local checkpoint, - * or {@link SequenceNumbersService#NO_OPS_PERFORMED}. + * {@link SequenceNumbers#NO_OPS_PERFORMED} and {@code localCheckpoint} should be set to the last known local checkpoint, + * or {@link SequenceNumbers#NO_OPS_PERFORMED}. * * @param indexSettings the index settings - * @param maxSeqNo the last sequence number assigned, or {@link SequenceNumbersService#NO_OPS_PERFORMED} - * @param localCheckpoint the last known local checkpoint, or {@link SequenceNumbersService#NO_OPS_PERFORMED} + * @param maxSeqNo the last sequence number assigned, or {@link SequenceNumbers#NO_OPS_PERFORMED} + * @param localCheckpoint the last known local checkpoint, or {@link SequenceNumbers#NO_OPS_PERFORMED} */ public LocalCheckpointTracker(final IndexSettings indexSettings, final long maxSeqNo, final long localCheckpoint) { - if (localCheckpoint < 0 && localCheckpoint != SequenceNumbersService.NO_OPS_PERFORMED) { + if (localCheckpoint < 0 && localCheckpoint != SequenceNumbers.NO_OPS_PERFORMED) { throw new IllegalArgumentException( - "local checkpoint must be non-negative or [" + SequenceNumbersService.NO_OPS_PERFORMED + "] " + "local checkpoint must be non-negative or [" + SequenceNumbers.NO_OPS_PERFORMED + "] " + "but was [" + localCheckpoint + "]"); } - if (maxSeqNo < 0 && maxSeqNo != SequenceNumbersService.NO_OPS_PERFORMED) { + if (maxSeqNo < 0 && maxSeqNo != SequenceNumbers.NO_OPS_PERFORMED) { throw new IllegalArgumentException( - "max seq. no. must be non-negative or [" + SequenceNumbersService.NO_OPS_PERFORMED + "] but was [" + maxSeqNo + "]"); + "max seq. no. must be non-negative or [" + SequenceNumbers.NO_OPS_PERFORMED + "] but was [" + maxSeqNo + "]"); } bitArraysSize = SETTINGS_BIT_ARRAYS_SIZE.get(indexSettings.getSettings()); - firstProcessedSeqNo = localCheckpoint == SequenceNumbersService.NO_OPS_PERFORMED ? 0 : localCheckpoint + 1; - nextSeqNo = maxSeqNo == SequenceNumbersService.NO_OPS_PERFORMED ? 0 : maxSeqNo + 1; + firstProcessedSeqNo = localCheckpoint == SequenceNumbers.NO_OPS_PERFORMED ? 0 : localCheckpoint + 1; + nextSeqNo = maxSeqNo == SequenceNumbers.NO_OPS_PERFORMED ? 0 : maxSeqNo + 1; checkpoint = localCheckpoint; } @@ -127,7 +127,7 @@ public synchronized void markSeqNoAsCompleted(final long seqNo) { * @param checkpoint the local checkpoint to reset this tracker to */ synchronized void resetCheckpoint(final long checkpoint) { - assert checkpoint != SequenceNumbersService.UNASSIGNED_SEQ_NO; + assert checkpoint != SequenceNumbers.UNASSIGNED_SEQ_NO; assert checkpoint <= this.checkpoint; processedSeqNo.clear(); firstProcessedSeqNo = checkpoint + 1; diff --git a/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java b/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java index 885fdfc9e6553..cf878f613a710 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java @@ -28,6 +28,14 @@ public class SequenceNumbers { public static final String LOCAL_CHECKPOINT_KEY = "local_checkpoint"; public static final String MAX_SEQ_NO = "max_seq_no"; + /** + * Represents an unassigned sequence number (e.g., can be used on primary operations before they are executed). + */ + public static final long UNASSIGNED_SEQ_NO = -2L; + /** + * Represents no operations have been performed on the shard. + */ + public static final long NO_OPS_PERFORMED = -1L; /** * Reads the sequence number stats from the commit data (maximum sequence number and local checkpoint) and uses the specified global @@ -40,16 +48,16 @@ public class SequenceNumbers { public static SeqNoStats loadSeqNoStatsFromLuceneCommit( final long globalCheckpoint, final Iterable> commitData) { - long maxSeqNo = SequenceNumbersService.NO_OPS_PERFORMED; - long localCheckpoint = SequenceNumbersService.NO_OPS_PERFORMED; + long maxSeqNo = NO_OPS_PERFORMED; + long localCheckpoint = NO_OPS_PERFORMED; for (final Map.Entry entry : commitData) { final String key = entry.getKey(); if (key.equals(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) { - assert localCheckpoint == SequenceNumbersService.NO_OPS_PERFORMED : localCheckpoint; + assert localCheckpoint == NO_OPS_PERFORMED : localCheckpoint; localCheckpoint = Long.parseLong(entry.getValue()); } else if (key.equals(SequenceNumbers.MAX_SEQ_NO)) { - assert maxSeqNo == SequenceNumbersService.NO_OPS_PERFORMED : maxSeqNo; + assert maxSeqNo == NO_OPS_PERFORMED : maxSeqNo; maxSeqNo = Long.parseLong(entry.getValue()); } } @@ -59,22 +67,22 @@ public static SeqNoStats loadSeqNoStatsFromLuceneCommit( /** * Compute the minimum of the given current minimum sequence number and the specified sequence number, accounting for the fact that the - * current minimum sequence number could be {@link SequenceNumbersService#NO_OPS_PERFORMED} or - * {@link SequenceNumbersService#UNASSIGNED_SEQ_NO}. When the current minimum sequence number is not - * {@link SequenceNumbersService#NO_OPS_PERFORMED} nor {@link SequenceNumbersService#UNASSIGNED_SEQ_NO}, the specified sequence number - * must not be {@link SequenceNumbersService#UNASSIGNED_SEQ_NO}. + * current minimum sequence number could be {@link SequenceNumbers#NO_OPS_PERFORMED} or + * {@link SequenceNumbers#UNASSIGNED_SEQ_NO}. When the current minimum sequence number is not + * {@link SequenceNumbers#NO_OPS_PERFORMED} nor {@link SequenceNumbers#UNASSIGNED_SEQ_NO}, the specified sequence number + * must not be {@link SequenceNumbers#UNASSIGNED_SEQ_NO}. * * @param minSeqNo the current minimum sequence number * @param seqNo the specified sequence number * @return the new minimum sequence number */ public static long min(final long minSeqNo, final long seqNo) { - if (minSeqNo == SequenceNumbersService.NO_OPS_PERFORMED) { + if (minSeqNo == NO_OPS_PERFORMED) { return seqNo; - } else if (minSeqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO) { + } else if (minSeqNo == UNASSIGNED_SEQ_NO) { return seqNo; } else { - if (seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO) { + if (seqNo == UNASSIGNED_SEQ_NO) { throw new IllegalArgumentException("sequence number must be assigned"); } return Math.min(minSeqNo, seqNo); @@ -83,22 +91,22 @@ public static long min(final long minSeqNo, final long seqNo) { /** * Compute the maximum of the given current maximum sequence number and the specified sequence number, accounting for the fact that the - * current maximum sequence number could be {@link SequenceNumbersService#NO_OPS_PERFORMED} or - * {@link SequenceNumbersService#UNASSIGNED_SEQ_NO}. When the current maximum sequence number is not - * {@link SequenceNumbersService#NO_OPS_PERFORMED} nor {@link SequenceNumbersService#UNASSIGNED_SEQ_NO}, the specified sequence number - * must not be {@link SequenceNumbersService#UNASSIGNED_SEQ_NO}. + * current maximum sequence number could be {@link SequenceNumbers#NO_OPS_PERFORMED} or + * {@link SequenceNumbers#UNASSIGNED_SEQ_NO}. When the current maximum sequence number is not + * {@link SequenceNumbers#NO_OPS_PERFORMED} nor {@link SequenceNumbers#UNASSIGNED_SEQ_NO}, the specified sequence number + * must not be {@link SequenceNumbers#UNASSIGNED_SEQ_NO}. * * @param maxSeqNo the current maximum sequence number * @param seqNo the specified sequence number * @return the new maximum sequence number */ public static long max(final long maxSeqNo, final long seqNo) { - if (maxSeqNo == SequenceNumbersService.NO_OPS_PERFORMED) { + if (maxSeqNo == NO_OPS_PERFORMED) { return seqNo; - } else if (maxSeqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO) { + } else if (maxSeqNo == UNASSIGNED_SEQ_NO) { return seqNo; } else { - if (seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO) { + if (seqNo == UNASSIGNED_SEQ_NO) { throw new IllegalArgumentException("sequence number must be assigned"); } return Math.max(maxSeqNo, seqNo); diff --git a/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java b/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java index ba5ce68287e7a..44ad8db39a2a6 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java @@ -32,16 +32,6 @@ */ public class SequenceNumbersService extends AbstractIndexShardComponent { - /** - * Represents an unassigned sequence number (e.g., can be used on primary operations before they are executed). - */ - public static final long UNASSIGNED_SEQ_NO = -2L; - - /** - * Represents no operations have been performed on the shard. - */ - public static final long NO_OPS_PERFORMED = -1L; - /** * Represents a local checkpoint coming from a pre-6.0 node */ @@ -52,15 +42,15 @@ public class SequenceNumbersService extends AbstractIndexShardComponent { /** * Initialize the sequence number service. The {@code maxSeqNo} should be set to the last sequence number assigned by this shard, or - * {@link SequenceNumbersService#NO_OPS_PERFORMED}, {@code localCheckpoint} should be set to the last known local checkpoint for this - * shard, or {@link SequenceNumbersService#NO_OPS_PERFORMED}, and {@code globalCheckpoint} should be set to the last known global - * checkpoint for this shard, or {@link SequenceNumbersService#UNASSIGNED_SEQ_NO}. + * {@link SequenceNumbers#NO_OPS_PERFORMED}, {@code localCheckpoint} should be set to the last known local checkpoint for this + * shard, or {@link SequenceNumbers#NO_OPS_PERFORMED}, and {@code globalCheckpoint} should be set to the last known global + * checkpoint for this shard, or {@link SequenceNumbers#UNASSIGNED_SEQ_NO}. * * @param shardId the shard this service is providing tracking local checkpoints for * @param indexSettings the index settings - * @param maxSeqNo the last sequence number assigned by this shard, or {@link SequenceNumbersService#NO_OPS_PERFORMED} - * @param localCheckpoint the last known local checkpoint for this shard, or {@link SequenceNumbersService#NO_OPS_PERFORMED} - * @param globalCheckpoint the last known global checkpoint for this shard, or {@link SequenceNumbersService#UNASSIGNED_SEQ_NO} + * @param maxSeqNo the last sequence number assigned by this shard, or {@link SequenceNumbers#NO_OPS_PERFORMED} + * @param localCheckpoint the last known local checkpoint for this shard, or {@link SequenceNumbers#NO_OPS_PERFORMED} + * @param globalCheckpoint the last known global checkpoint for this shard, or {@link SequenceNumbers#UNASSIGNED_SEQ_NO} */ public SequenceNumbersService( final ShardId shardId, diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index af2c45e42cd13..22d6ba20be2cd 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -104,6 +104,7 @@ import org.elasticsearch.index.search.stats.ShardSearchStats; import org.elasticsearch.index.seqno.GlobalCheckpointTracker; import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.shard.PrimaryReplicaSyncer.ResyncTask; import org.elasticsearch.index.similarity.SimilarityService; @@ -631,7 +632,7 @@ private IndexShardState changeState(IndexShardState newState, String reason) { public Engine.IndexResult applyIndexOperationOnPrimary(long version, VersionType versionType, SourceToParse sourceToParse, long autoGeneratedTimestamp, boolean isRetry, Consumer onMappingUpdate) throws IOException { - return applyIndexOperation(SequenceNumbersService.UNASSIGNED_SEQ_NO, primaryTerm, version, versionType, autoGeneratedTimestamp, + return applyIndexOperation(SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, version, versionType, autoGeneratedTimestamp, isRetry, Engine.Operation.Origin.PRIMARY, sourceToParse, onMappingUpdate); } @@ -728,7 +729,7 @@ private Engine.NoOpResult noOp(Engine engine, Engine.NoOp noOp) { public Engine.DeleteResult applyDeleteOperationOnPrimary(long version, String type, String id, VersionType versionType, Consumer onMappingUpdate) throws IOException { - return applyDeleteOperation(SequenceNumbersService.UNASSIGNED_SEQ_NO, primaryTerm, version, type, id, versionType, + return applyDeleteOperation(SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, version, type, id, versionType, Engine.Operation.Origin.PRIMARY, onMappingUpdate); } @@ -2134,8 +2135,8 @@ public void acquireReplicaOperationPermit(final long operationPrimaryTerm, final updateGlobalCheckpointOnReplica(globalCheckpoint, "primary term transition"); final long currentGlobalCheckpoint = getGlobalCheckpoint(); final long localCheckpoint; - if (currentGlobalCheckpoint == SequenceNumbersService.UNASSIGNED_SEQ_NO) { - localCheckpoint = SequenceNumbersService.NO_OPS_PERFORMED; + if (currentGlobalCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO) { + localCheckpoint = SequenceNumbers.NO_OPS_PERFORMED; } else { localCheckpoint = currentGlobalCheckpoint; } diff --git a/core/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java b/core/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java index 3716dcaff0fc7..08d64cb82bc72 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java +++ b/core/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java @@ -35,7 +35,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -231,7 +231,7 @@ protected void doRun() throws Exception { while ((operation = snapshot.next()) != null) { final long seqNo = operation.seqNo(); if (startingSeqNo >= 0 && - (seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO || seqNo < startingSeqNo)) { + (seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO || seqNo < startingSeqNo)) { totalSkippedOps.incrementAndGet(); continue; } diff --git a/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java b/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java index 547d5aa499fb3..32aef840b6f37 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java @@ -28,7 +28,7 @@ import org.apache.lucene.store.OutputStreamIndexOutput; import org.apache.lucene.store.SimpleFSDirectory; import org.elasticsearch.common.io.Channels; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -105,8 +105,8 @@ private void write(DataOutput out) throws IOException { static Checkpoint emptyTranslogCheckpoint(final long offset, final long generation, final long globalCheckpoint, long minTranslogGeneration) { - final long minSeqNo = SequenceNumbersService.NO_OPS_PERFORMED; - final long maxSeqNo = SequenceNumbersService.NO_OPS_PERFORMED; + final long minSeqNo = SequenceNumbers.NO_OPS_PERFORMED; + final long maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED; return new Checkpoint(offset, 0, generation, minSeqNo, maxSeqNo, globalCheckpoint, minTranslogGeneration); } @@ -116,9 +116,9 @@ static Checkpoint readCheckpointV6_0_0(final DataInput in) throws IOException { // reads a checksummed checkpoint introduced in ES 5.0.0 static Checkpoint readCheckpointV5_0_0(final DataInput in) throws IOException { - final long minSeqNo = SequenceNumbersService.NO_OPS_PERFORMED; - final long maxSeqNo = SequenceNumbersService.NO_OPS_PERFORMED; - final long globalCheckpoint = SequenceNumbersService.UNASSIGNED_SEQ_NO; + final long minSeqNo = SequenceNumbers.NO_OPS_PERFORMED; + final long maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED; + final long globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; final long minTranslogGeneration = -1L; return new Checkpoint(in.readLong(), in.readInt(), in.readLong(), minSeqNo, maxSeqNo, globalCheckpoint, minTranslogGeneration); } diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index 3664b76807818..20c428960f747 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -43,7 +43,7 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.IndexShardComponent; @@ -634,7 +634,7 @@ private Stream readersAboveMinSeqNo(long minSeqNo) return Stream.concat(readers.stream(), Stream.of(current)) .filter(reader -> { final long maxSeqNo = reader.getCheckpoint().maxSeqNo; - return maxSeqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO || maxSeqNo >= minSeqNo; + return maxSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO || maxSeqNo >= minSeqNo; }); } @@ -978,7 +978,7 @@ public Index(StreamInput in) throws IOException { seqNo = in.readLong(); primaryTerm = in.readLong(); } else { - seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; primaryTerm = 0; } } @@ -1182,7 +1182,7 @@ public Delete(StreamInput in) throws IOException { seqNo = in.readLong(); primaryTerm = in.readLong(); } else { - seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; primaryTerm = 0; } } @@ -1329,7 +1329,7 @@ public String reason() { } public NoOp(final long seqNo, final long primaryTerm, final String reason) { - assert seqNo > SequenceNumbersService.NO_OPS_PERFORMED; + assert seqNo > SequenceNumbers.NO_OPS_PERFORMED; assert primaryTerm >= 0; assert reason != null; this.seqNo = seqNo; diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index 9c95471e60e82..c12299feaa596 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -31,7 +31,6 @@ import org.elasticsearch.common.io.Channels; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.seqno.SequenceNumbers; -import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.shard.ShardId; import java.io.BufferedOutputStream; @@ -97,9 +96,9 @@ private TranslogWriter( this.outputStream = new BufferedChannelOutputStream(java.nio.channels.Channels.newOutputStream(channel), bufferSize.bytesAsInt()); this.lastSyncedCheckpoint = initialCheckpoint; this.totalOffset = initialCheckpoint.offset; - assert initialCheckpoint.minSeqNo == SequenceNumbersService.NO_OPS_PERFORMED : initialCheckpoint.minSeqNo; + assert initialCheckpoint.minSeqNo == SequenceNumbers.NO_OPS_PERFORMED : initialCheckpoint.minSeqNo; this.minSeqNo = initialCheckpoint.minSeqNo; - assert initialCheckpoint.maxSeqNo == SequenceNumbersService.NO_OPS_PERFORMED : initialCheckpoint.maxSeqNo; + assert initialCheckpoint.maxSeqNo == SequenceNumbers.NO_OPS_PERFORMED : initialCheckpoint.maxSeqNo; this.maxSeqNo = initialCheckpoint.maxSeqNo; this.globalCheckpointSupplier = globalCheckpointSupplier; this.seenSequenceNumbers = Assertions.ENABLED ? new HashMap<>() : null; @@ -193,10 +192,10 @@ public synchronized Translog.Location add(final BytesReference data, final long } totalOffset += data.length(); - if (minSeqNo == SequenceNumbersService.NO_OPS_PERFORMED) { + if (minSeqNo == SequenceNumbers.NO_OPS_PERFORMED) { assert operationCounter == 0; } - if (maxSeqNo == SequenceNumbersService.NO_OPS_PERFORMED) { + if (maxSeqNo == SequenceNumbers.NO_OPS_PERFORMED) { assert operationCounter == 0; } @@ -211,7 +210,7 @@ public synchronized Translog.Location add(final BytesReference data, final long } private synchronized boolean assertNoSeqNumberConflict(long seqNo, BytesReference data) throws IOException { - if (seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO) { + if (seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO) { // nothing to do } else if (seenSequenceNumbers.containsKey(seqNo)) { final Tuple previous = seenSequenceNumbers.get(seqNo); diff --git a/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java b/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java index 408691692cacf..325f840bd7c30 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java @@ -40,7 +40,7 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import java.io.IOException; import java.nio.channels.Channels; @@ -169,7 +169,7 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th /** Write a checkpoint file to the given location with the given generation */ public static void writeEmptyCheckpoint(Path filename, int translogLength, long translogGeneration) throws IOException { Checkpoint emptyCheckpoint = Checkpoint.emptyTranslogCheckpoint(translogLength, translogGeneration, - SequenceNumbersService.UNASSIGNED_SEQ_NO, translogGeneration); + SequenceNumbers.UNASSIGNED_SEQ_NO, translogGeneration); Checkpoint.write(FileChannel::open, filename, emptyCheckpoint, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW); // fsync with metadata here to make sure. diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index dcb6f5759d120..65a8a0d0f6e0b 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -42,7 +42,7 @@ import org.elasticsearch.index.engine.RecoveryEngineException; import org.elasticsearch.index.mapper.MapperException; import org.elasticsearch.index.seqno.SeqNoStats; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IllegalIndexShardStateException; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; @@ -319,10 +319,10 @@ private StartRecoveryRequest getStartRecoveryRequest(final RecoveryTarget recove if (metadataSnapshot.size() > 0) { startingSeqNo = getStartingSeqNo(recoveryTarget); } else { - startingSeqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + startingSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; } - if (startingSeqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO) { + if (startingSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO) { logger.trace("{} preparing for file-based recovery from [{}]", recoveryTarget.shardId(), recoveryTarget.sourceNode()); } else { logger.trace( @@ -348,7 +348,7 @@ private StartRecoveryRequest getStartRecoveryRequest(final RecoveryTarget recove * Get the starting sequence number for a sequence-number-based request. * * @param recoveryTarget the target of the recovery - * @return the starting sequence number or {@link SequenceNumbersService#UNASSIGNED_SEQ_NO} if obtaining the starting sequence number + * @return the starting sequence number or {@link SequenceNumbers#UNASSIGNED_SEQ_NO} if obtaining the starting sequence number * failed */ public static long getStartingSeqNo(final RecoveryTarget recoveryTarget) { @@ -364,7 +364,7 @@ public static long getStartingSeqNo(final RecoveryTarget recoveryTarget) { */ return seqNoStats.getLocalCheckpoint() + 1; } else { - return SequenceNumbersService.UNASSIGNED_SEQ_NO; + return SequenceNumbers.UNASSIGNED_SEQ_NO; } } catch (final IOException e) { /* @@ -372,7 +372,7 @@ public static long getStartingSeqNo(final RecoveryTarget recoveryTarget) { * translog on the recovery target is opened, the recovery enters a retry loop seeing now that the index files are on disk and * proceeds to attempt a sequence-number-based recovery. */ - return SequenceNumbersService.UNASSIGNED_SEQ_NO; + return SequenceNumbers.UNASSIGNED_SEQ_NO; } } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java index 2bdf45fede28c..6337467e78330 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java @@ -22,7 +22,7 @@ import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.transport.TransportRequest; @@ -63,7 +63,7 @@ public void readFrom(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { globalCheckpoint = in.readZLong(); } else { - globalCheckpoint = SequenceNumbersService.UNASSIGNED_SEQ_NO; + globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; } } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index a5c1d9cf371e2..73ab31975684c 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -47,7 +47,7 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.RecoveryEngineException; import org.elasticsearch.index.seqno.LocalCheckpointTracker; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardClosedException; import org.elasticsearch.index.shard.IndexShardRelocatedException; @@ -147,7 +147,7 @@ public RecoveryResponse recoverToTarget() throws IOException { final Translog translog = shard.getTranslog(); final long startingSeqNo; - boolean isSequenceNumberBasedRecoveryPossible = request.startingSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO && + boolean isSequenceNumberBasedRecoveryPossible = request.startingSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && isTranslogReadyForSequenceNumberBasedRecovery(); if (isSequenceNumberBasedRecoveryPossible) { @@ -162,7 +162,7 @@ public RecoveryResponse recoverToTarget() throws IOException { } // we set this to unassigned to create a translog roughly according to the retention policy // on the target - startingSeqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + startingSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; try { phase1(phase1Snapshot.getIndexCommit(), translog::totalOperations); @@ -239,16 +239,13 @@ boolean isTranslogReadyForSequenceNumberBasedRecovery() throws IOException { try (Translog.Snapshot snapshot = shard.getTranslog().newSnapshotFromMinSeqNo(startingSeqNo)) { Translog.Operation operation; while ((operation = snapshot.next()) != null) { - if (operation.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { + if (operation.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { tracker.markSeqNoAsCompleted(operation.seqNo()); } } } return tracker.getCheckpoint() >= endingSeqNo; } else { - // norelease this can currently happen if a snapshot restore rolls the primary back to a previous commit point; in this - // situation the local checkpoint on the replica can be far in advance of the maximum sequence number on the primary violating - // all assumptions regarding local and global checkpoints return false; } } @@ -427,7 +424,7 @@ void prepareTargetForTranslog(final int totalTranslogOps) throws IOException { * point-in-time view of the translog). It then sends each translog operation to the target node so it can be replayed into the new * shard. * - * @param startingSeqNo the sequence number to start recovery from, or {@link SequenceNumbersService#UNASSIGNED_SEQ_NO} if all + * @param startingSeqNo the sequence number to start recovery from, or {@link SequenceNumbers#UNASSIGNED_SEQ_NO} if all * ops should be sent * @param snapshot a snapshot of the translog * @@ -513,7 +510,7 @@ protected SendSnapshotResult sendSnapshot(final long startingSeqNo, final Transl long size = 0; int skippedOps = 0; int totalSentOps = 0; - final AtomicLong targetLocalCheckpoint = new AtomicLong(SequenceNumbersService.UNASSIGNED_SEQ_NO); + final AtomicLong targetLocalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); final List operations = new ArrayList<>(); final int expectedTotalOps = snapshot.totalOperations(); @@ -536,7 +533,7 @@ protected SendSnapshotResult sendSnapshot(final long startingSeqNo, final Transl * any ops before the starting sequence number. */ final long seqNo = operation.seqNo(); - if (startingSeqNo >= 0 && (seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO || seqNo < startingSeqNo)) { + if (startingSeqNo >= 0 && (seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO || seqNo < startingSeqNo)) { skippedOps++; continue; } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsResponse.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsResponse.java index 731eb28ed92c7..530b8b67415d3 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsResponse.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsResponse.java @@ -22,7 +22,7 @@ import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.transport.FutureTransportResponseHandler; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseHandler; @@ -56,7 +56,7 @@ public void readFrom(final StreamInput in) throws IOException { localCheckpoint = in.readZLong(); } else { - localCheckpoint = SequenceNumbersService.UNASSIGNED_SEQ_NO; + localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; } } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java b/core/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java index 825fa8306bada..57ea19ff298d9 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java @@ -23,7 +23,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; import org.elasticsearch.transport.TransportRequest; @@ -122,7 +122,7 @@ public void readFrom(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { startingSeqNo = in.readLong(); } else { - startingSeqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + startingSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; } } diff --git a/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java b/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java index c3af5593cd7c4..912bcdc9d852a 100644 --- a/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java +++ b/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java @@ -19,10 +19,14 @@ package org.elasticsearch.plugins; +import java.util.Collection; import java.util.Collections; import java.util.Map; +import java.util.function.BiConsumer; import java.util.function.Supplier; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.cluster.service.MasterService; @@ -106,4 +110,11 @@ default Map> getZenHostsProviders(Transpo NetworkService networkService) { return Collections.emptyMap(); } + + /** + * Returns a consumer that validate the initial join cluster state. The validator, unless null is called exactly once per + * join attempt but might be called multiple times during the lifetime of a node. Validators are expected to throw a + * {@link IllegalStateException} if the node and the cluster-state are incompatible. + */ + default BiConsumer getJoinValidator() { return null; } } diff --git a/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index 44940c403c71a..2c707a2b9a28c 100644 --- a/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -212,6 +212,9 @@ public void preProcess(boolean rewrite) { + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey() + "] index level setting."); } if (rescore != null) { + if (sort != null) { + throw new QueryPhaseExecutionException(this, "Cannot use [sort] option in conjunction with [rescore]."); + } int maxWindow = indexService.getIndexSettings().getMaxRescoreWindow(); for (RescoreContext rescoreContext: rescore) { if (rescoreContext.getWindowSize() > maxWindow) { diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index f39a75f3ae046..4e7c070bfeef8 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -43,6 +43,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.MergeSchedulerConfig; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.query.InnerHitContextBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; @@ -82,6 +83,7 @@ import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QueryPhase; +import org.elasticsearch.search.query.QueryPhaseExecutionException; import org.elasticsearch.search.query.QuerySearchRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.ScrollQuerySearchResult; @@ -106,6 +108,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.LongSupplier; +import static org.elasticsearch.common.unit.TimeValue.timeValueHours; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes; @@ -113,7 +116,9 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv // we can have 5 minutes here, since we make sure to clean with search requests and when shard/index closes public static final Setting DEFAULT_KEEPALIVE_SETTING = - Setting.positiveTimeSetting("search.default_keep_alive", timeValueMinutes(5), Property.NodeScope); + Setting.positiveTimeSetting("search.default_keep_alive", timeValueMinutes(5), Property.NodeScope, Property.Dynamic); + public static final Setting MAX_KEEPALIVE_SETTING = + Setting.positiveTimeSetting("search.max_keep_alive", timeValueHours(24), Property.NodeScope, Property.Dynamic); public static final Setting KEEPALIVE_INTERVAL_SETTING = Setting.positiveTimeSetting("search.keep_alive_interval", timeValueMinutes(1), Property.NodeScope); /** @@ -147,7 +152,9 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv private final FetchPhase fetchPhase; - private final long defaultKeepAlive; + private volatile long defaultKeepAlive; + + private volatile long maxKeepAlive; private volatile TimeValue defaultSearchTimeout; @@ -173,7 +180,10 @@ public SearchService(ClusterService clusterService, IndicesService indicesServic this.fetchPhase = fetchPhase; TimeValue keepAliveInterval = KEEPALIVE_INTERVAL_SETTING.get(settings); - this.defaultKeepAlive = DEFAULT_KEEPALIVE_SETTING.get(settings).millis(); + setKeepAlives(DEFAULT_KEEPALIVE_SETTING.get(settings), MAX_KEEPALIVE_SETTING.get(settings)); + + clusterService.getClusterSettings().addSettingsUpdateConsumer(DEFAULT_KEEPALIVE_SETTING, MAX_KEEPALIVE_SETTING, + this::setKeepAlives, this::validateKeepAlives); this.keepAliveReaper = threadPool.scheduleWithFixedDelay(new Reaper(), keepAliveInterval, Names.SAME); @@ -184,6 +194,20 @@ public SearchService(ClusterService clusterService, IndicesService indicesServic clusterService.getClusterSettings().addSettingsUpdateConsumer(LOW_LEVEL_CANCELLATION_SETTING, this::setLowLevelCancellation); } + private void validateKeepAlives(TimeValue defaultKeepAlive, TimeValue maxKeepAlive) { + if (defaultKeepAlive.millis() > maxKeepAlive.millis()) { + throw new IllegalArgumentException("Default keep alive setting for scroll [" + DEFAULT_KEEPALIVE_SETTING.getKey() + "]" + + " should be smaller than max keep alive [" + MAX_KEEPALIVE_SETTING.getKey() + "], " + + "was (" + defaultKeepAlive.format() + " > " + maxKeepAlive.format() + ")"); + } + } + + private void setKeepAlives(TimeValue defaultKeepAlive, TimeValue maxKeepAlive) { + validateKeepAlives(defaultKeepAlive, maxKeepAlive); + this.defaultKeepAlive = defaultKeepAlive.millis(); + this.maxKeepAlive = maxKeepAlive.millis(); + } + private void setDefaultSearchTimeout(TimeValue defaultSearchTimeout) { this.defaultSearchTimeout = defaultSearchTimeout; } @@ -547,7 +571,7 @@ final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.S if (request.scroll() != null && request.scroll().keepAlive() != null) { keepAlive = request.scroll().keepAlive().millis(); } - context.keepAlive(keepAlive); + contextScrollKeepAlive(context, keepAlive); context.lowLevelCancellation(lowLevelCancellation); } catch (Exception e) { context.close(); @@ -625,6 +649,16 @@ public void freeAllScrollContexts() { } } + private void contextScrollKeepAlive(SearchContext context, long keepAlive) throws IOException { + if (keepAlive > maxKeepAlive) { + throw new QueryPhaseExecutionException(context, + "Keep alive for scroll (" + TimeValue.timeValueMillis(keepAlive).format() + ") is too large. " + + "It must be less than (" + TimeValue.timeValueMillis(maxKeepAlive).format() + "). " + + "This limit can be set by changing the [" + MAX_KEEPALIVE_SETTING.getKey() + "] cluster level setting."); + } + context.keepAlive(keepAlive); + } + private void contextProcessing(SearchContext context) { // disable timeout while executing a search context.accessed(-1); @@ -847,13 +881,13 @@ private void shortcutDocIdsToLoad(SearchContext context) { context.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length); } - private void processScroll(InternalScrollSearchRequest request, SearchContext context) { + private void processScroll(InternalScrollSearchRequest request, SearchContext context) throws IOException { // process scroll context.from(context.from() + context.size()); context.scrollContext().scroll = request.scroll(); // update the context keep alive based on the new scroll value if (request.scroll() != null && request.scroll().keepAlive() != null) { - context.keepAlive(request.scroll().keepAlive().millis()); + contextScrollKeepAlive(context, request.scroll().keepAlive().millis()); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java index c5be9f3551b15..cede3ae9661db 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.script.Script; import org.elasticsearch.script.SearchScript; @@ -529,6 +530,17 @@ public TopHitsAggregationBuilder subAggregations(Builder subFactories) { @Override protected TopHitsAggregatorFactory doBuild(SearchContext context, AggregatorFactory parent, Builder subfactoriesBuilder) throws IOException { + long innerResultWindow = from() + size(); + int maxInnerResultWindow = context.mapperService().getIndexSettings().getMaxInnerResultWindow(); + if (innerResultWindow > maxInnerResultWindow) { + throw new IllegalArgumentException( + "Top hits result window is too large, the top hits aggregator [" + name + "]'s from + size must be less " + + "than or equal to: [" + maxInnerResultWindow + "] but was [" + innerResultWindow + + "]. This limit can be set by changing the [" + IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey() + + "] index level setting." + ); + } + List fields = new ArrayList<>(); if (scriptFields != null) { for (ScriptField field : scriptFields) { diff --git a/core/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java b/core/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java index 31e7d0aed3f65..46b03c61ca0b9 100644 --- a/core/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java +++ b/core/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java @@ -289,8 +289,11 @@ static TopDocsCollectorContext createTopDocsCollectorContext(SearchContext searc } else { int numDocs = Math.min(searchContext.from() + searchContext.size(), totalNumDocs); final boolean rescore = searchContext.rescore().isEmpty() == false; - for (RescoreContext rescoreContext : searchContext.rescore()) { - numDocs = Math.max(numDocs, rescoreContext.getWindowSize()); + if (rescore) { + assert searchContext.sort() == null; + for (RescoreContext rescoreContext : searchContext.rescore()) { + numDocs = Math.max(numDocs, rescoreContext.getWindowSize()); + } } return new SimpleTopDocsCollectorContext(searchContext.sort(), searchContext.searchAfter(), diff --git a/core/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java b/core/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java index a1d85765fa1ab..b8ce8f8118b50 100644 --- a/core/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java +++ b/core/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java @@ -19,6 +19,9 @@ package org.elasticsearch.search.rescore; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.component.AbstractComponent; @@ -47,10 +50,31 @@ public void execute(SearchContext context) { TopDocs topDocs = context.queryResult().topDocs(); for (RescoreContext ctx : context.rescore()) { topDocs = ctx.rescorer().rescore(topDocs, context.searcher(), ctx); + // It is the responsibility of the rescorer to sort the resulted top docs, + // here we only assert that this condition is met. + assert context.sort() == null && topDocsSortedByScore(topDocs): "topdocs should be sorted after rescore"; } context.queryResult().topDocs(topDocs, context.queryResult().sortValueFormats()); } catch (IOException e) { throw new ElasticsearchException("Rescore Phase Failed", e); } } + + /** + * Returns true if the provided docs are sorted by score. + */ + private boolean topDocsSortedByScore(TopDocs topDocs) { + if (topDocs == null || topDocs.scoreDocs == null || topDocs.scoreDocs.length < 2) { + return true; + } + float lastScore = topDocs.scoreDocs[0].score; + for (int i = 1; i < topDocs.scoreDocs.length; i++) { + ScoreDoc doc = topDocs.scoreDocs[i]; + if (Float.compare(doc.score, lastScore) > 0) { + return false; + } + lastScore = doc.score; + } + return true; + } } diff --git a/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java index 6b2c82a7b672a..529cb4e86ac88 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java @@ -19,8 +19,6 @@ package org.elasticsearch.search.sort; -import static org.elasticsearch.search.sort.NestedSortBuilder.NESTED_FIELD; - import org.apache.lucene.search.SortField; import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; @@ -46,6 +44,8 @@ import java.io.IOException; import java.util.Objects; +import static org.elasticsearch.search.sort.NestedSortBuilder.NESTED_FIELD; + /** * A sort builder to sort based on a document field. */ @@ -91,7 +91,9 @@ public FieldSortBuilder(FieldSortBuilder template) { } this.setNestedFilter(template.getNestedFilter()); this.setNestedPath(template.getNestedPath()); - this.setNestedSort(template.getNestedSort()); + if (template.getNestedSort() != null) { + this.setNestedSort(template.getNestedSort()); + }; } /** @@ -203,10 +205,13 @@ public SortMode sortMode() { * Sets the nested filter that the nested objects should match with in order * to be taken into account for sorting. * - * TODO should the above getters and setters be deprecated/ changed in - * favour of real getters and setters? + * @deprecated set nested sort with {@link #setNestedSort(NestedSortBuilder)} and retrieve with {@link #getNestedSort()} */ + @Deprecated public FieldSortBuilder setNestedFilter(QueryBuilder nestedFilter) { + if (this.nestedSort != null) { + throw new IllegalArgumentException("Setting both nested_path/nested_filter and nested not allowed"); + } this.nestedFilter = nestedFilter; return this; } @@ -214,7 +219,10 @@ public FieldSortBuilder setNestedFilter(QueryBuilder nestedFilter) { /** * Returns the nested filter that the nested objects should match with in * order to be taken into account for sorting. + * + * @deprecated set nested sort with {@link #setNestedSort(NestedSortBuilder)} and retrieve with {@link #getNestedSort()} */ + @Deprecated public QueryBuilder getNestedFilter() { return this.nestedFilter; } @@ -223,8 +231,14 @@ public QueryBuilder getNestedFilter() { * Sets the nested path if sorting occurs on a field that is inside a nested * object. By default when sorting on a field inside a nested object, the * nearest upper nested object is selected as nested path. + * + * @deprecated set nested sort with {@link #setNestedSort(NestedSortBuilder)} and retrieve with {@link #getNestedSort()} */ + @Deprecated public FieldSortBuilder setNestedPath(String nestedPath) { + if (this.nestedSort != null) { + throw new IllegalArgumentException("Setting both nested_path/nested_filter and nested not allowed"); + } this.nestedPath = nestedPath; return this; } @@ -232,16 +246,30 @@ public FieldSortBuilder setNestedPath(String nestedPath) { /** * Returns the nested path if sorting occurs in a field that is inside a * nested object. + * @deprecated set nested sort with {@link #setNestedSort(NestedSortBuilder)} and retrieve with {@link #getNestedSort()} */ + @Deprecated public String getNestedPath() { return this.nestedPath; } + /** + * Returns the {@link NestedSortBuilder} + */ public NestedSortBuilder getNestedSort() { return this.nestedSort; } + /** + * Sets the {@link NestedSortBuilder} to be used for fields that are inside a nested + * object. The {@link NestedSortBuilder} takes a `path` argument and an optional + * nested filter that the nested objects should match with in + * order to be taken into account for sorting. + */ public FieldSortBuilder setNestedSort(final NestedSortBuilder nestedSort) { + if (this.nestedFilter != null || this.nestedPath != null) { + throw new IllegalArgumentException("Setting both nested_path/nested_filter and nested not allowed"); + } this.nestedSort = nestedSort; return this; } @@ -380,14 +408,22 @@ public static FieldSortBuilder fromXContent(XContentParser parser, String fieldN } @Override - public SortBuilder rewrite(QueryRewriteContext ctx) throws IOException { - if (nestedFilter == null) { + public FieldSortBuilder rewrite(QueryRewriteContext ctx) throws IOException { + if (nestedFilter == null && nestedSort == null) { return this; } - QueryBuilder rewrite = nestedFilter.rewrite(ctx); - if (nestedFilter == rewrite) { - return this; + if (nestedFilter != null) { + QueryBuilder rewrite = nestedFilter.rewrite(ctx); + if (nestedFilter == rewrite) { + return this; + } + return new FieldSortBuilder(this).setNestedFilter(rewrite); + } else { + NestedSortBuilder rewrite = nestedSort.rewrite(ctx); + if (nestedSort == rewrite) { + return this; + } + return new FieldSortBuilder(this).setNestedSort(rewrite); } - return new FieldSortBuilder(this).setNestedFilter(rewrite); } } diff --git a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index da7864b80aece..8dcd2fc766f4e 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -300,10 +300,17 @@ public SortMode sortMode() { } /** - * Sets the nested filter that the nested objects should match with in order to be taken into account - * for sorting. - */ + * Sets the nested filter that the nested objects should match with in order to + * be taken into account for sorting. + * + * @deprecated set nested sort with {@link #setNestedSort(NestedSortBuilder)} + * and retrieve with {@link #getNestedSort()} + **/ + @Deprecated public GeoDistanceSortBuilder setNestedFilter(QueryBuilder nestedFilter) { + if (this.nestedSort != null) { + throw new IllegalArgumentException("Setting both nested_path/nested_filter and nested not allowed"); + } this.nestedFilter = nestedFilter; return this; } @@ -311,7 +318,10 @@ public GeoDistanceSortBuilder setNestedFilter(QueryBuilder nestedFilter) { /** * Returns the nested filter that the nested objects should match with in order to be taken into account * for sorting. + * @deprecated set nested sort with {@link #setNestedSort(NestedSortBuilder)} + * and retrieve with {@link #getNestedSort()} **/ + @Deprecated public QueryBuilder getNestedFilter() { return this.nestedFilter; } @@ -319,8 +329,14 @@ public QueryBuilder getNestedFilter() { /** * Sets the nested path if sorting occurs on a field that is inside a nested object. By default when sorting on a * field inside a nested object, the nearest upper nested object is selected as nested path. - */ + * @deprecated set nested sort with {@link #setNestedSort(NestedSortBuilder)} + * and retrieve with {@link #getNestedSort()} + **/ + @Deprecated public GeoDistanceSortBuilder setNestedPath(String nestedPath) { + if (this.nestedSort != null) { + throw new IllegalArgumentException("Setting both nested_path/nested_filter and nested not allowed"); + } this.nestedPath = nestedPath; return this; } @@ -328,16 +344,31 @@ public GeoDistanceSortBuilder setNestedPath(String nestedPath) { /** * Returns the nested path if sorting occurs on a field that is inside a nested object. By default when sorting on a * field inside a nested object, the nearest upper nested object is selected as nested path. - */ + * @deprecated set nested sort with {@link #setNestedSort(NestedSortBuilder)} + * and retrieve with {@link #getNestedSort()} + **/ + @Deprecated public String getNestedPath() { return this.nestedPath; } + /** + * Returns the {@link NestedSortBuilder} + */ public NestedSortBuilder getNestedSort() { return this.nestedSort; } + /** + * Sets the {@link NestedSortBuilder} to be used for fields that are inside a nested + * object. The {@link NestedSortBuilder} takes a `path` argument and an optional + * nested filter that the nested objects should match with in + * order to be taken into account for sorting. + */ public GeoDistanceSortBuilder setNestedSort(final NestedSortBuilder nestedSort) { + if (this.nestedFilter != null || this.nestedPath != null) { + throw new IllegalArgumentException("Setting both nested_path/nested_filter and nested not allowed"); + } this.nestedSort = nestedSort; return this; } @@ -445,7 +476,7 @@ public static GeoDistanceSortBuilder fromXContent(XContentParser parser, String fieldName = currentName; } else if (token == XContentParser.Token.START_OBJECT) { if (NESTED_FILTER_FIELD.match(currentName)) { - DEPRECATION_LOGGER.deprecated("[nested_filter] has been deprecated in favour for the [nested] parameter"); + DEPRECATION_LOGGER.deprecated("[nested_filter] has been deprecated in favour of the [nested] parameter"); nestedFilter = parseInnerQueryBuilder(parser); } else if (NESTED_FIELD.match(currentName)) { nestedSort = NestedSortBuilder.fromXContent(parser); @@ -475,7 +506,7 @@ public static GeoDistanceSortBuilder fromXContent(XContentParser parser, String } else if (SORTMODE_FIELD.match(currentName)) { sortMode = SortMode.fromString(parser.text()); } else if (NESTED_PATH_FIELD.match(currentName)) { - DEPRECATION_LOGGER.deprecated("[nested_path] has been deprecated in favor of the [nested] parameter"); + DEPRECATION_LOGGER.deprecated("[nested_path] has been deprecated in favour of the [nested] parameter"); nestedPath = parser.text(); } else if (token == Token.VALUE_STRING){ if (fieldName != null && fieldName.equals(currentName) == false) { @@ -650,14 +681,22 @@ static void parseGeoPoints(XContentParser parser, List geoPoints) thro } @Override - public SortBuilder rewrite(QueryRewriteContext ctx) throws IOException { - if (nestedFilter == null) { + public GeoDistanceSortBuilder rewrite(QueryRewriteContext ctx) throws IOException { + if (nestedFilter == null && nestedSort == null) { return this; } - QueryBuilder rewrite = nestedFilter.rewrite(ctx); - if (nestedFilter == rewrite) { - return this; + if (nestedFilter != null) { + QueryBuilder rewrite = nestedFilter.rewrite(ctx); + if (nestedFilter == rewrite) { + return this; + } + return new GeoDistanceSortBuilder(this).setNestedFilter(rewrite); + } else { + NestedSortBuilder rewrite = nestedSort.rewrite(ctx); + if (nestedSort == rewrite) { + return this; + } + return new GeoDistanceSortBuilder(this).setNestedSort(rewrite); } - return new GeoDistanceSortBuilder(this).setNestedFilter(rewrite); } } diff --git a/core/src/main/java/org/elasticsearch/search/sort/NestedSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/NestedSortBuilder.java index 941ac2909ab0d..a6ad028403453 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/NestedSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/NestedSortBuilder.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; import java.io.IOException; import java.util.Objects; @@ -149,4 +150,23 @@ public boolean equals(final Object obj) { public int hashCode() { return Objects.hash(path, filter, nestedSort); } + + public NestedSortBuilder rewrite(QueryRewriteContext ctx) throws IOException { + if (filter == null && nestedSort == null) { + return this; + } + QueryBuilder rewriteFilter = this.filter; + NestedSortBuilder rewriteNested = this.nestedSort; + if (filter != null) { + rewriteFilter = filter.rewrite(ctx); + } + if (nestedSort != null) { + rewriteNested = nestedSort.rewrite(ctx); + } + if (rewriteFilter != this.filter || rewriteNested != this.nestedSort) { + return new NestedSortBuilder(this.path).setFilter(rewriteFilter).setNestedSort(rewriteNested); + } else { + return this; + } + } } diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java index 2a6eb3a561720..c27979fdc748e 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java @@ -124,7 +124,7 @@ public String getWriteableName() { } @Override - public SortBuilder rewrite(QueryRewriteContext ctx) throws IOException { + public ScoreSortBuilder rewrite(QueryRewriteContext ctx) throws IOException { return this; } } diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java index e9f28f4617ecf..331988a183fa9 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java @@ -176,15 +176,24 @@ public SortMode sortMode() { /** * Sets the nested filter that the nested objects should match with in order to be taken into account * for sorting. + * + * @deprecated set nested sort with {@link #setNestedSort(NestedSortBuilder)} and retrieve with {@link #getNestedSort()} */ + @Deprecated public ScriptSortBuilder setNestedFilter(QueryBuilder nestedFilter) { + if (this.nestedSort != null) { + throw new IllegalArgumentException("Setting both nested_path/nested_filter and nested not allowed"); + } this.nestedFilter = nestedFilter; return this; } /** * Gets the nested filter. + * + * @deprecated set nested sort with {@link #setNestedSort(NestedSortBuilder)} and retrieve with {@link #getNestedSort()} */ + @Deprecated public QueryBuilder getNestedFilter() { return this.nestedFilter; } @@ -192,24 +201,45 @@ public QueryBuilder getNestedFilter() { /** * Sets the nested path if sorting occurs on a field that is inside a nested object. For sorting by script this * needs to be specified. + * + * @deprecated set nested sort with {@link #setNestedSort(NestedSortBuilder)} and retrieve with {@link #getNestedSort()} */ + @Deprecated public ScriptSortBuilder setNestedPath(String nestedPath) { + if (this.nestedSort != null) { + throw new IllegalArgumentException("Setting both nested_path/nested_filter and nested not allowed"); + } this.nestedPath = nestedPath; return this; } /** * Gets the nested path. + * + * @deprecated set nested sort with {@link #setNestedSort(NestedSortBuilder)} and retrieve with {@link #getNestedSort()} */ + @Deprecated public String getNestedPath() { return this.nestedPath; } + /** + * Returns the {@link NestedSortBuilder} + */ public NestedSortBuilder getNestedSort() { return this.nestedSort; } + /** + * Sets the {@link NestedSortBuilder} to be used for fields that are inside a nested + * object. The {@link NestedSortBuilder} takes a `path` argument and an optional + * nested filter that the nested objects should match with in + * order to be taken into account for sorting. + */ public ScriptSortBuilder setNestedSort(final NestedSortBuilder nestedSort) { + if (this.nestedFilter != null || this.nestedPath != null) { + throw new IllegalArgumentException("Setting both nested_path/nested_filter and nested not allowed"); + } this.nestedSort = nestedSort; return this; } @@ -420,14 +450,22 @@ public String toString() { } @Override - public SortBuilder rewrite(QueryRewriteContext ctx) throws IOException { - if (nestedFilter == null) { + public ScriptSortBuilder rewrite(QueryRewriteContext ctx) throws IOException { + if (nestedFilter == null && nestedSort == null) { return this; } - QueryBuilder rewrite = nestedFilter.rewrite(ctx); - if (nestedFilter == rewrite) { - return this; + if (nestedFilter != null) { + QueryBuilder rewrite = nestedFilter.rewrite(ctx); + if (nestedFilter == rewrite) { + return this; + } + return new ScriptSortBuilder(this).setNestedFilter(rewrite); + } else { + NestedSortBuilder rewrite = nestedSort.rewrite(ctx); + if (nestedSort == rewrite) { + return this; + } + return new ScriptSortBuilder(this).setNestedSort(rewrite); } - return new ScriptSortBuilder(this).setNestedFilter(rewrite); } } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java index 0b127b2eeef7d..5690acd7abd97 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java @@ -18,17 +18,16 @@ */ package org.elasticsearch.search.suggest.completion; +import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.BulkScorer; import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Weight; -import org.apache.lucene.search.suggest.Lookup; import org.apache.lucene.search.suggest.document.CompletionQuery; import org.apache.lucene.search.suggest.document.TopSuggestDocs; import org.apache.lucene.search.suggest.document.TopSuggestDocsCollector; import org.apache.lucene.util.CharsRefBuilder; -import org.apache.lucene.util.PriorityQueue; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.mapper.CompletionFieldMapper; import org.elasticsearch.search.suggest.Suggest; @@ -53,12 +52,14 @@ protected Suggest.Suggestion getContexts() { } } - private static final class SuggestDocPriorityQueue extends PriorityQueue { + private final Map docsMap; - SuggestDocPriorityQueue(int maxSize) { - super(maxSize); - } - - @Override - protected boolean lessThan(SuggestDoc a, SuggestDoc b) { - if (a.score == b.score) { - int cmp = Lookup.CHARSEQUENCE_COMPARATOR.compare(a.key, b.key); - if (cmp == 0) { - // prefer smaller doc id, in case of a tie - return a.doc > b.doc; - } else { - return cmp > 0; - } - } - return a.score < b.score; - } - - public SuggestDoc[] getResults() { - int size = size(); - SuggestDoc[] res = new SuggestDoc[size]; - for (int i = size - 1; i >= 0; i--) { - res[i] = pop(); - } - return res; - } - } - - private final int num; - private final SuggestDocPriorityQueue pq; - private final Map scoreDocMap; - - // TODO: expose dup removal - - TopDocumentsCollector(int num) { - super(1, false); // TODO hack, we don't use the underlying pq, so we allocate a size of 1 - this.num = num; - this.scoreDocMap = new LinkedHashMap<>(num); - this.pq = new SuggestDocPriorityQueue(num); - } - - @Override - public int getCountToCollect() { - // This is only needed because we initialize - // the base class with 1 instead of the actual num - return num; - } - - - @Override - protected void doSetNextReader(LeafReaderContext context) throws IOException { - super.doSetNextReader(context); - updateResults(); - } - - private void updateResults() { - for (SuggestDoc suggestDoc : scoreDocMap.values()) { - if (pq.insertWithOverflow(suggestDoc) == suggestDoc) { - break; - } - } - scoreDocMap.clear(); + TopDocumentsCollector(int num, boolean skipDuplicates) { + super(Math.max(1, num), skipDuplicates); + this.docsMap = new LinkedHashMap<>(num); } @Override public void collect(int docID, CharSequence key, CharSequence context, float score) throws IOException { - if (scoreDocMap.containsKey(docID)) { - SuggestDoc suggestDoc = scoreDocMap.get(docID); - suggestDoc.add(key, context, score); - } else if (scoreDocMap.size() <= num) { - scoreDocMap.put(docID, new SuggestDoc(docBase + docID, key, context, score)); + int globalDoc = docID + docBase; + if (docsMap.containsKey(globalDoc)) { + docsMap.get(globalDoc).add(key, context, score); } else { - throw new CollectionTerminatedException(); + docsMap.put(globalDoc, new SuggestDoc(globalDoc, key, context, score)); + super.collect(docID, key, context, score); } } @Override public TopSuggestDocs get() throws IOException { - updateResults(); // to empty the last set of collected suggest docs - TopSuggestDocs.SuggestScoreDoc[] suggestScoreDocs = pq.getResults(); - if (suggestScoreDocs.length > 0) { - return new TopSuggestDocs(suggestScoreDocs.length, suggestScoreDocs, suggestScoreDocs[0].score); - } else { + TopSuggestDocs entries = super.get(); + if (entries.scoreDocs.length == 0) { return TopSuggestDocs.EMPTY; } + // The parent class returns suggestions, not documents, and dedup only the surface form (without contexts). + // The following code groups suggestions matching different contexts by document id and dedup the surface form + contexts + // if needed (skip_duplicates). + int size = entries.scoreDocs.length; + final List suggestDocs = new ArrayList(size); + final CharArraySet seenSurfaceForms = doSkipDuplicates() ? new CharArraySet(size, false) : null; + for (TopSuggestDocs.SuggestScoreDoc suggestEntry : entries.scoreLookupDocs()) { + final SuggestDoc suggestDoc; + if (docsMap != null) { + suggestDoc = docsMap.get(suggestEntry.doc); + } else { + suggestDoc = new SuggestDoc(suggestEntry.doc, suggestEntry.key, suggestEntry.context, suggestEntry.score); + } + if (doSkipDuplicates()) { + if (seenSurfaceForms.contains(suggestDoc.key)) { + continue; + } + seenSurfaceForms.add(suggestDoc.key); + } + suggestDocs.add(suggestDoc); + } + return new TopSuggestDocs((int) entries.totalHits, + suggestDocs.toArray(new TopSuggestDocs.SuggestScoreDoc[0]), entries.getMaxScore()); } } } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java index 229b77aad2850..6d5cd9e588a36 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java @@ -18,8 +18,10 @@ */ package org.elasticsearch.search.suggest.completion; +import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.suggest.Lookup; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -68,11 +70,36 @@ public final class CompletionSuggestion extends Suggest.Suggestion> toRe // the global top size entries are collected from the shard results // using a priority queue OptionPriorityQueue priorityQueue = new OptionPriorityQueue(leader.getSize(), COMPARATOR); + // Dedup duplicate suggestions (based on the surface form) if skip duplicates is activated + final CharArraySet seenSurfaceForms = leader.skipDuplicates ? new CharArraySet(leader.getSize(), false) : null; for (Suggest.Suggestion suggestion : toReduce) { assert suggestion.getName().equals(name) : "name should be identical across all suggestions"; for (Entry.Option option : ((CompletionSuggestion) suggestion).getOptions()) { + if (leader.skipDuplicates) { + assert ((CompletionSuggestion) suggestion).skipDuplicates; + String text = option.getText().string(); + if (seenSurfaceForms.contains(text)) { + continue; + } + seenSurfaceForms.add(text); + } if (option == priorityQueue.insertWithOverflow(option)) { // if the current option has overflown from pq, // we can assume all of the successive options @@ -157,7 +194,7 @@ public static CompletionSuggestion reduceTo(List> toRe } } } - final CompletionSuggestion suggestion = new CompletionSuggestion(leader.getName(), leader.getSize()); + final CompletionSuggestion suggestion = new CompletionSuggestion(leader.getName(), leader.getSize(), leader.skipDuplicates); final Entry entry = new Entry(leaderEntry.getText(), leaderEntry.getOffset(), leaderEntry.getLength()); Collections.addAll(entry.getOptions(), priorityQueue.get()); suggestion.addTerm(entry); diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java index 462aa8e271bad..224204bfc8dd3 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.suggest.completion; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; @@ -57,6 +58,7 @@ public class CompletionSuggestionBuilder extends SuggestionBuilderfalse. + */ + public CompletionSuggestionBuilder skipDuplicates(boolean skipDuplicates) { + this.skipDuplicates = skipDuplicates; + return this; + } + private static class InnerBuilder extends CompletionSuggestionBuilder { private String field; @@ -231,6 +257,9 @@ protected XContentBuilder innerToXContent(XContentBuilder builder, Params params if (regexOptions != null) { regexOptions.toXContent(builder, params); } + if (skipDuplicates) { + builder.field(SKIP_DUPLICATES_FIELD.getPreferredName(), skipDuplicates); + } if (contextBytes != null) { builder.rawField(CONTEXTS_FIELD.getPreferredName(), contextBytes); } @@ -255,8 +284,12 @@ public SuggestionContext build(QueryShardContext context) throws IOException { // copy over common settings to each suggestion builder final MapperService mapperService = context.getMapperService(); populateCommonFields(mapperService, suggestionContext); + suggestionContext.setSkipDuplicates(skipDuplicates); suggestionContext.setFuzzyOptions(fuzzyOptions); suggestionContext.setRegexOptions(regexOptions); + if (shardSize != null) { + suggestionContext.setShardSize(shardSize); + } MappedFieldType mappedFieldType = mapperService.fullName(suggestionContext.getField()); if (mappedFieldType == null || mappedFieldType instanceof CompletionFieldMapper.CompletionFieldType == false) { throw new IllegalArgumentException("Field [" + suggestionContext.getField() + "] is not a completion suggest field"); @@ -302,13 +335,14 @@ public String getWriteableName() { @Override protected boolean doEquals(CompletionSuggestionBuilder other) { - return Objects.equals(fuzzyOptions, other.fuzzyOptions) && + return skipDuplicates == other.skipDuplicates && + Objects.equals(fuzzyOptions, other.fuzzyOptions) && Objects.equals(regexOptions, other.regexOptions) && Objects.equals(contextBytes, other.contextBytes); } @Override protected int doHashCode() { - return Objects.hash(fuzzyOptions, regexOptions, contextBytes); + return Objects.hash(fuzzyOptions, regexOptions, contextBytes, skipDuplicates); } } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionContext.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionContext.java index b12b90de107ed..e7c0b45745bd6 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionContext.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionContext.java @@ -40,6 +40,7 @@ protected CompletionSuggestionContext(QueryShardContext shardContext) { private CompletionFieldMapper.CompletionFieldType fieldType; private FuzzyOptions fuzzyOptions; private RegexOptions regexOptions; + private boolean skipDuplicates; private Map> queryContexts = Collections.emptyMap(); CompletionFieldMapper.CompletionFieldType getFieldType() { @@ -62,6 +63,10 @@ void setQueryContexts(Map> que this.queryContexts = queryContexts; } + void setSkipDuplicates(boolean skipDuplicates) { + this.skipDuplicates = skipDuplicates; + } + public FuzzyOptions getFuzzyOptions() { return fuzzyOptions; } @@ -74,6 +79,10 @@ public Map> getQueryContexts() return queryContexts; } + public boolean isSkipDuplicates() { + return skipDuplicates; + } + CompletionQuery toQuery() { CompletionFieldMapper.CompletionFieldType fieldType = getFieldType(); final CompletionQuery query; diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java index 8ff86e593c068..1af4f101e04a2 100644 --- a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -194,7 +194,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i protected final NetworkService networkService; protected final Set profileSettings; - protected volatile TransportServiceAdapter transportServiceAdapter; + protected volatile TransportService transportService; // node id to actual channel protected final ConcurrentMap connectedNodes = newConcurrentMap(); @@ -270,11 +270,11 @@ public CircuitBreaker getInFlightRequestBreaker() { } @Override - public void transportServiceAdapter(TransportServiceAdapter service) { + public void setTransportService(TransportService service) { if (service.getRequestHandler(HANDSHAKE_ACTION_NAME) != null) { throw new IllegalStateException(HANDSHAKE_ACTION_NAME + " is a reserved request handler and must not be registered"); } - this.transportServiceAdapter = service; + this.transportService = service; } private static class HandshakeResponseHandler implements TransportResponseHandler { @@ -444,7 +444,7 @@ public void close() throws IOException { try { closeChannels(Arrays.stream(channels).filter(Objects::nonNull).collect(Collectors.toList()), false); } finally { - transportServiceAdapter.onConnectionClosed(this); + transportService.onConnectionClosed(this); } } } @@ -500,7 +500,7 @@ public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfil logger.debug("connected to node [{}]", node); } try { - transportServiceAdapter.onNodeConnected(node); + transportService.onNodeConnected(node); } finally { if (nodeChannels.isClosed()) { // we got closed concurrently due to a disconnect or some other event on the channel. @@ -512,7 +512,7 @@ public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfil // try to remove it first either way one of the two wins even if the callback has run before we even added the // tuple to the map since in that case we remove it here again if (connectedNodes.remove(node, nodeChannels)) { - transportServiceAdapter.onNodeDisconnected(node); + transportService.onNodeDisconnected(node); } throw new NodeNotConnectedException(node, "connection concurrently closed"); } @@ -597,7 +597,7 @@ public final NodeChannels openConnection(DiscoveryNode node, ConnectionProfile c connectTimeout : connectionProfile.getHandshakeTimeout(); final Version version = executeHandshake(node, channel, handshakeTimeout); nodeChannels = new NodeChannels(nodeChannels, version); // clone the channels - we now have the correct version - transportServiceAdapter.onConnectionOpened(nodeChannels); + transportService.onConnectionOpened(nodeChannels); connectionRef.set(nodeChannels); success = true; return nodeChannels; @@ -625,7 +625,7 @@ private void disconnectFromNodeCloseAndNotify(DiscoveryNode node, NodeChannels n if (closeLock.readLock().tryLock()) { try { if (connectedNodes.remove(node, nodeChannels)) { - transportServiceAdapter.onNodeDisconnected(node); + transportService.onNodeDisconnected(node); } } finally { closeLock.readLock().unlock(); @@ -665,7 +665,7 @@ public void disconnectFromNode(DiscoveryNode node) { } finally { closeLock.readLock().unlock(); if (nodeChannels != null) { // if we found it and removed it we close and notify - IOUtils.closeWhileHandlingException(nodeChannels, () -> transportServiceAdapter.onNodeDisconnected(node)); + IOUtils.closeWhileHandlingException(nodeChannels, () -> transportService.onNodeDisconnected(node)); } } } @@ -916,7 +916,7 @@ protected final void doStop() { Map.Entry next = iterator.next(); try { IOUtils.closeWhileHandlingException(next.getValue()); - transportServiceAdapter.onNodeDisconnected(next.getKey()); + transportService.onNodeDisconnected(next.getKey()); } finally { iterator.remove(); } @@ -1078,7 +1078,7 @@ private void sendRequestToChannel(final DiscoveryNode node, final Channel target final TransportRequestOptions finalOptions = options; // this might be called in a different thread SendListener onRequestSent = new SendListener(stream, - () -> transportServiceAdapter.onRequestSent(node, requestId, action, request, finalOptions), message.length()); + () -> transportService.onRequestSent(node, requestId, action, request, finalOptions), message.length()); internalSendMessage(targetChannel, message, onRequestSent); addedReleaseListener = true; } finally { @@ -1125,7 +1125,7 @@ public void sendErrorResponse(Version nodeVersion, Channel channel, final Except final BytesReference header = buildHeader(requestId, status, nodeVersion, bytes.length()); CompositeBytesReference message = new CompositeBytesReference(header, bytes); SendListener onResponseSent = new SendListener(null, - () -> transportServiceAdapter.onResponseSent(requestId, action, error), message.length()); + () -> transportService.onResponseSent(requestId, action, error), message.length()); internalSendMessage(channel, message, onResponseSent); } } @@ -1160,7 +1160,7 @@ private void sendResponse(Version nodeVersion, Channel channel, final TransportR final TransportResponseOptions finalOptions = options; // this might be called in a different thread SendListener listener = new SendListener(stream, - () -> transportServiceAdapter.onResponseSent(requestId, action, response, finalOptions), message.length()); + () -> transportService.onResponseSent(requestId, action, response, finalOptions), message.length()); internalSendMessage(channel, message, listener); addedReleaseListener = true; } finally { @@ -1356,14 +1356,14 @@ public final void messageReceived(BytesReference reference, Channel channel, Str if (isHandshake) { handler = pendingHandshakes.remove(requestId); } else { - TransportResponseHandler theHandler = transportServiceAdapter.onResponseReceived(requestId); + TransportResponseHandler theHandler = transportService.onResponseReceived(requestId); if (theHandler == null && TransportStatus.isError(status)) { handler = pendingHandshakes.remove(requestId); } else { handler = theHandler; } } - // ignore if its null, the adapter logs it + // ignore if its null, the service logs it if (handler != null) { if (TransportStatus.isError(status)) { handlerResponseError(streamIn, handler); @@ -1456,7 +1456,7 @@ private void handleException(final TransportResponseHandler handler, Throwable e protected String handleRequest(Channel channel, String profileName, final StreamInput stream, long requestId, int messageLengthBytes, Version version, InetSocketAddress remoteAddress, byte status) throws IOException { final String action = stream.readString(); - transportServiceAdapter.onRequestReceived(requestId, action); + transportService.onRequestReceived(requestId, action); TransportChannel transportChannel = null; try { if (TransportStatus.isHandshake(status)) { @@ -1464,7 +1464,7 @@ protected String handleRequest(Channel channel, String profileName, final Stream sendResponse(version, channel, response, requestId, HANDSHAKE_ACTION_NAME, TransportResponseOptions.EMPTY, TransportStatus.setHandshake((byte) 0)); } else { - final RequestHandlerRegistry reg = transportServiceAdapter.getRequestHandler(action); + final RequestHandlerRegistry reg = transportService.getRequestHandler(action); if (reg == null) { throw new ActionNotFoundTransportException(action); } diff --git a/core/src/main/java/org/elasticsearch/transport/Transport.java b/core/src/main/java/org/elasticsearch/transport/Transport.java index 5d22e156d9d13..b3471b942dae2 100644 --- a/core/src/main/java/org/elasticsearch/transport/Transport.java +++ b/core/src/main/java/org/elasticsearch/transport/Transport.java @@ -40,7 +40,7 @@ public interface Transport extends LifecycleComponent { Setting TRANSPORT_TCP_COMPRESS = Setting.boolSetting("transport.tcp.compress", false, Property.NodeScope); - void transportServiceAdapter(TransportServiceAdapter service); + void setTransportService(TransportService service); /** * The address the transport is bound on. diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java index a68e319bb2c11..fa8278dea3669 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -106,8 +105,6 @@ protected boolean removeEldestEntry(Map.Entry eldest) { } }); - private final TransportService.Adapter adapter; - public static final TransportInterceptor NOOP_TRANSPORT_INTERCEPTOR = new TransportInterceptor() {}; // tracer log @@ -148,7 +145,7 @@ public void close() throws IOException { * Build the service. * * @param clusterSettings if non null the the {@linkplain TransportService} will register with the {@link ClusterSettings} for settings - * updates for {@link #TRACE_LOG_EXCLUDE_SETTING} and {@link #TRACE_LOG_INCLUDE_SETTING}. + * updates for {@link #TRACE_LOG_EXCLUDE_SETTING} and {@link #TRACE_LOG_INCLUDE_SETTING}. */ public TransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor transportInterceptor, Function localNodeFactory, @Nullable ClusterSettings clusterSettings) { @@ -160,7 +157,6 @@ public TransportService(Settings settings, Transport transport, ThreadPool threa setTracerLogInclude(TRACE_LOG_INCLUDE_SETTING.get(settings)); setTracerLogExclude(TRACE_LOG_EXCLUDE_SETTING.get(settings)); tracerLog = Loggers.getLogger(logger, ".tracer"); - adapter = createAdapter(); taskManager = createTaskManager(); this.interceptor = transportInterceptor; this.asyncSender = interceptor.interceptSender(this::sendRequestInternal); @@ -187,10 +183,6 @@ public TaskManager getTaskManager() { return taskManager; } - protected Adapter createAdapter() { - return new Adapter(); - } - protected TaskManager createTaskManager() { return new TaskManager(settings); } @@ -205,7 +197,7 @@ void setTracerLogExclude(List tracerLogExclude) { @Override protected void doStart() { - transport.transportServiceAdapter(adapter); + transport.setTransportService(this); transport.start(); if (transport.boundAddress() != null && logger.isInfoEnabled()) { @@ -632,11 +624,11 @@ protected void doRun() throws Exception { } private void sendLocalRequest(long requestId, final String action, final TransportRequest request, TransportRequestOptions options) { - final DirectResponseChannel channel = new DirectResponseChannel(logger, localNode, action, requestId, adapter, threadPool); + final DirectResponseChannel channel = new DirectResponseChannel(logger, localNode, action, requestId, this, threadPool); try { - adapter.onRequestSent(localNode, requestId, action, request, options); - adapter.onRequestReceived(requestId, action); - final RequestHandlerRegistry reg = adapter.getRequestHandler(action); + onRequestSent(localNode, requestId, action, request, options); + onRequestReceived(requestId, action); + final RequestHandlerRegistry reg = getRequestHandler(action); if (reg == null) { throw new ActionNotFoundTransportException("Action [" + action + "] not found"); } @@ -782,177 +774,171 @@ private void registerRequestHandler(RequestHa } } - protected RequestHandlerRegistry getRequestHandler(String action) { - return requestHandlers.get(action); + /** called by the {@link Transport} implementation once a request has been sent */ + void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request, + TransportRequestOptions options) { + if (traceEnabled() && shouldTraceAction(action)) { + traceRequestSent(node, requestId, action, options); + } } - protected class Adapter implements TransportServiceAdapter { + protected boolean traceEnabled() { + return tracerLog.isTraceEnabled(); + } - @Override - public void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request, - TransportRequestOptions options) { - if (traceEnabled() && shouldTraceAction(action)) { - traceRequestSent(node, requestId, action, options); - } + /** called by the {@link Transport} implementation once a response was sent to calling node */ + void onResponseSent(long requestId, String action, TransportResponse response, TransportResponseOptions options) { + if (traceEnabled() && shouldTraceAction(action)) { + traceResponseSent(requestId, action); } + } - protected boolean traceEnabled() { - return tracerLog.isTraceEnabled(); + /** called by the {@link Transport} implementation after an exception was sent as a response to an incoming request */ + void onResponseSent(long requestId, String action, Exception e) { + if (traceEnabled() && shouldTraceAction(action)) { + traceResponseSent(requestId, action, e); } + } - @Override - public void onResponseSent(long requestId, String action, TransportResponse response, TransportResponseOptions options) { - if (traceEnabled() && shouldTraceAction(action)) { - traceResponseSent(requestId, action); - } - } + protected void traceResponseSent(long requestId, String action, Exception e) { + tracerLog.trace( + (org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage("[{}][{}] sent error response", requestId, action), e); + } - @Override - public void onResponseSent(long requestId, String action, Exception e) { - if (traceEnabled() && shouldTraceAction(action)) { - traceResponseSent(requestId, action, e); - } + /** + * called by the {@link Transport} implementation when an incoming request arrives but before + * any parsing of it has happened (with the exception of the requestId and action) + */ + void onRequestReceived(long requestId, String action) { + try { + blockIncomingRequestsLatch.await(); + } catch (InterruptedException e) { + logger.trace("interrupted while waiting for incoming requests block to be removed"); } - - protected void traceResponseSent(long requestId, String action, Exception e) { - tracerLog.trace( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("[{}][{}] sent error response", requestId, action), e); + if (traceEnabled() && shouldTraceAction(action)) { + traceReceivedRequest(requestId, action); } + } - @Override - public void onRequestReceived(long requestId, String action) { - try { - blockIncomingRequestsLatch.await(); - } catch (InterruptedException e) { - logger.trace("interrupted while waiting for incoming requests block to be removed"); - } - if (traceEnabled() && shouldTraceAction(action)) { - traceReceivedRequest(requestId, action); - } - } + public RequestHandlerRegistry getRequestHandler(String action) { + return requestHandlers.get(action); + } - @Override - public RequestHandlerRegistry getRequestHandler(String action) { - return requestHandlers.get(action); - } + /** + * called by the {@link Transport} implementation when a response or an exception has been received for a previously + * sent request (before any processing or deserialization was done). Returns the appropriate response handler or null if not + * found. + */ + public TransportResponseHandler onResponseReceived(final long requestId) { + RequestHolder holder = clientHandlers.remove(requestId); - @Override - public TransportResponseHandler onResponseReceived(final long requestId) { - RequestHolder holder = clientHandlers.remove(requestId); + if (holder == null) { + checkForTimeout(requestId); + return null; + } + holder.cancelTimeout(); + if (traceEnabled() && shouldTraceAction(holder.action())) { + traceReceivedResponse(requestId, holder.connection().getNode(), holder.action()); + } + return holder.handler(); + } - if (holder == null) { - checkForTimeout(requestId); - return null; - } - holder.cancelTimeout(); - if (traceEnabled() && shouldTraceAction(holder.action())) { - traceReceivedResponse(requestId, holder.connection().getNode(), holder.action()); - } - return holder.handler(); - } - - protected void checkForTimeout(long requestId) { - // lets see if its in the timeout holder, but sync on mutex to make sure any ongoing timeout handling has finished - final DiscoveryNode sourceNode; - final String action; - assert clientHandlers.get(requestId) == null; - TimeoutInfoHolder timeoutInfoHolder = timeoutInfoHandlers.remove(requestId); - if (timeoutInfoHolder != null) { - long time = System.currentTimeMillis(); - logger.warn("Received response for a request that has timed out, sent [{}ms] ago, timed out [{}ms] ago, " + + private void checkForTimeout(long requestId) { + // lets see if its in the timeout holder, but sync on mutex to make sure any ongoing timeout handling has finished + final DiscoveryNode sourceNode; + final String action; + assert clientHandlers.get(requestId) == null; + TimeoutInfoHolder timeoutInfoHolder = timeoutInfoHandlers.remove(requestId); + if (timeoutInfoHolder != null) { + long time = System.currentTimeMillis(); + logger.warn("Received response for a request that has timed out, sent [{}ms] ago, timed out [{}ms] ago, " + "action [{}], node [{}], id [{}]", time - timeoutInfoHolder.sentTime(), time - timeoutInfoHolder.timeoutTime(), - timeoutInfoHolder.action(), timeoutInfoHolder.node(), requestId); - action = timeoutInfoHolder.action(); - sourceNode = timeoutInfoHolder.node(); - } else { - logger.warn("Transport response handler not found of id [{}]", requestId); - action = null; - sourceNode = null; - } - // call tracer out of lock - if (traceEnabled() == false) { - return; - } - if (action == null) { - assert sourceNode == null; - traceUnresolvedResponse(requestId); - } else if (shouldTraceAction(action)) { - traceReceivedResponse(requestId, sourceNode, action); - } + timeoutInfoHolder.action(), timeoutInfoHolder.node(), requestId); + action = timeoutInfoHolder.action(); + sourceNode = timeoutInfoHolder.node(); + } else { + logger.warn("Transport response handler not found of id [{}]", requestId); + action = null; + sourceNode = null; } - - @Override - public void onNodeConnected(final DiscoveryNode node) { - // capture listeners before spawning the background callback so the following pattern won't trigger a call - // connectToNode(); connection is completed successfully - // addConnectionListener(); this listener shouldn't be called - final Stream listenersToNotify = TransportService.this.connectionListeners.stream(); - threadPool.generic().execute(() -> listenersToNotify.forEach(listener -> listener.onNodeConnected(node))); + // call tracer out of lock + if (traceEnabled() == false) { + return; } - - @Override - public void onConnectionOpened(Transport.Connection connection) { - // capture listeners before spawning the background callback so the following pattern won't trigger a call - // connectToNode(); connection is completed successfully - // addConnectionListener(); this listener shouldn't be called - final Stream listenersToNotify = TransportService.this.connectionListeners.stream(); - threadPool.generic().execute(() -> listenersToNotify.forEach(listener -> listener.onConnectionOpened(connection))); + if (action == null) { + assert sourceNode == null; + traceUnresolvedResponse(requestId); + } else if (shouldTraceAction(action)) { + traceReceivedResponse(requestId, sourceNode, action); } + } - @Override - public void onNodeDisconnected(final DiscoveryNode node) { - try { - threadPool.generic().execute( () -> { - for (final TransportConnectionListener connectionListener : connectionListeners) { - connectionListener.onNodeDisconnected(node); - } - }); - } catch (EsRejectedExecutionException ex) { - logger.debug("Rejected execution on NodeDisconnected", ex); - } + void onNodeConnected(final DiscoveryNode node) { + // capture listeners before spawning the background callback so the following pattern won't trigger a call + // connectToNode(); connection is completed successfully + // addConnectionListener(); this listener shouldn't be called + final Stream listenersToNotify = TransportService.this.connectionListeners.stream(); + threadPool.generic().execute(() -> listenersToNotify.forEach(listener -> listener.onNodeConnected(node))); + } + + void onConnectionOpened(Transport.Connection connection) { + // capture listeners before spawning the background callback so the following pattern won't trigger a call + // connectToNode(); connection is completed successfully + // addConnectionListener(); this listener shouldn't be called + final Stream listenersToNotify = TransportService.this.connectionListeners.stream(); + threadPool.generic().execute(() -> listenersToNotify.forEach(listener -> listener.onConnectionOpened(connection))); + } + + public void onNodeDisconnected(final DiscoveryNode node) { + try { + threadPool.generic().execute( () -> { + for (final TransportConnectionListener connectionListener : connectionListeners) { + connectionListener.onNodeDisconnected(node); + } + }); + } catch (EsRejectedExecutionException ex) { + logger.debug("Rejected execution on NodeDisconnected", ex); } + } - @Override - public void onConnectionClosed(Transport.Connection connection) { - try { - for (Map.Entry entry : clientHandlers.entrySet()) { - RequestHolder holder = entry.getValue(); - if (holder.connection().getCacheKey().equals(connection.getCacheKey())) { - final RequestHolder holderToNotify = clientHandlers.remove(entry.getKey()); - if (holderToNotify != null) { - // callback that an exception happened, but on a different thread since we don't - // want handlers to worry about stack overflows - threadPool.generic().execute(() -> holderToNotify.handler().handleException(new NodeDisconnectedException( - connection.getNode(), holderToNotify.action()))); - } + void onConnectionClosed(Transport.Connection connection) { + try { + for (Map.Entry entry : clientHandlers.entrySet()) { + RequestHolder holder = entry.getValue(); + if (holder.connection().getCacheKey().equals(connection.getCacheKey())) { + final RequestHolder holderToNotify = clientHandlers.remove(entry.getKey()); + if (holderToNotify != null) { + // callback that an exception happened, but on a different thread since we don't + // want handlers to worry about stack overflows + threadPool.generic().execute(() -> holderToNotify.handler().handleException(new NodeDisconnectedException( + connection.getNode(), holderToNotify.action()))); } } - } catch (EsRejectedExecutionException ex) { - logger.debug("Rejected execution on onConnectionClosed", ex); } + } catch (EsRejectedExecutionException ex) { + logger.debug("Rejected execution on onConnectionClosed", ex); } + } - protected void traceReceivedRequest(long requestId, String action) { - tracerLog.trace("[{}][{}] received request", requestId, action); - } - - protected void traceResponseSent(long requestId, String action) { - tracerLog.trace("[{}][{}] sent response", requestId, action); - } + protected void traceReceivedRequest(long requestId, String action) { + tracerLog.trace("[{}][{}] received request", requestId, action); + } - protected void traceReceivedResponse(long requestId, DiscoveryNode sourceNode, String action) { - tracerLog.trace("[{}][{}] received response from [{}]", requestId, action, sourceNode); - } + protected void traceResponseSent(long requestId, String action) { + tracerLog.trace("[{}][{}] sent response", requestId, action); + } - protected void traceUnresolvedResponse(long requestId) { - tracerLog.trace("[{}] received response but can't resolve it to a request", requestId); - } + protected void traceReceivedResponse(long requestId, DiscoveryNode sourceNode, String action) { + tracerLog.trace("[{}][{}] received response from [{}]", requestId, action, sourceNode); + } - protected void traceRequestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) { - tracerLog.trace("[{}][{}] sent to [{}] (timeout: [{}])", requestId, action, node, options.timeout()); - } + protected void traceUnresolvedResponse(long requestId) { + tracerLog.trace("[{}] received response but can't resolve it to a request", requestId); + } + protected void traceRequestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) { + tracerLog.trace("[{}][{}] sent to [{}] (timeout: [{}])", requestId, action, node, options.timeout()); } class TimeoutHandler implements Runnable { @@ -1118,16 +1104,16 @@ static class DirectResponseChannel implements TransportChannel { final DiscoveryNode localNode; private final String action; private final long requestId; - final TransportServiceAdapter adapter; + final TransportService service; final ThreadPool threadPool; DirectResponseChannel(Logger logger, DiscoveryNode localNode, String action, long requestId, - TransportServiceAdapter adapter, ThreadPool threadPool) { + TransportService service, ThreadPool threadPool) { this.logger = logger; this.localNode = localNode; this.action = action; this.requestId = requestId; - this.adapter = adapter; + this.service = service; this.threadPool = threadPool; } @@ -1148,9 +1134,9 @@ public void sendResponse(TransportResponse response) throws IOException { @Override public void sendResponse(final TransportResponse response, TransportResponseOptions options) throws IOException { - adapter.onResponseSent(requestId, action, response, options); - final TransportResponseHandler handler = adapter.onResponseReceived(requestId); - // ignore if its null, the adapter logs it + service.onResponseSent(requestId, action, response, options); + final TransportResponseHandler handler = service.onResponseReceived(requestId); + // ignore if its null, the service logs it if (handler != null) { final String executor = handler.executor(); if (ThreadPool.Names.SAME.equals(executor)) { @@ -1172,9 +1158,9 @@ protected void processResponse(TransportResponseHandler handler, TransportRespon @Override public void sendResponse(Exception exception) throws IOException { - adapter.onResponseSent(requestId, action, exception); - final TransportResponseHandler handler = adapter.onResponseReceived(requestId); - // ignore if its null, the adapter logs it + service.onResponseSent(requestId, action, exception); + final TransportResponseHandler handler = service.onResponseReceived(requestId); + // ignore if its null, the service logs it if (handler != null) { final RemoteTransportException rtx = wrapInRemote(exception); final String executor = handler.executor(); diff --git a/core/src/main/java/org/elasticsearch/transport/TransportServiceAdapter.java b/core/src/main/java/org/elasticsearch/transport/TransportServiceAdapter.java deleted file mode 100644 index 24a71a99998a4..0000000000000 --- a/core/src/main/java/org/elasticsearch/transport/TransportServiceAdapter.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport; - -import org.elasticsearch.cluster.node.DiscoveryNode; - -public interface TransportServiceAdapter extends TransportConnectionListener { - - /** called by the {@link Transport} implementation once a request has been sent */ - void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options); - - /** called by the {@link Transport} implementation once a response was sent to calling node */ - void onResponseSent(long requestId, String action, TransportResponse response, TransportResponseOptions options); - - /** called by the {@link Transport} implementation after an exception was sent as a response to an incoming request */ - void onResponseSent(long requestId, String action, Exception e); - - /** - * called by the {@link Transport} implementation when a response or an exception has been received for a previously - * sent request (before any processing or deserialization was done). Returns the appropriate response handler or null if not - * found. - */ - TransportResponseHandler onResponseReceived(long requestId); - - /** - * called by the {@link Transport} implementation when an incoming request arrives but before - * any parsing of it has happened (with the exception of the requestId and action) - */ - void onRequestReceived(long requestId, String action); - - RequestHandlerRegistry getRequestHandler(String action); -} diff --git a/core/src/main/java/org/joda/time/format/StrictISODateTimeFormat.java b/core/src/main/java/org/joda/time/format/StrictISODateTimeFormat.java index f2be8c9118068..4533b78b84add 100644 --- a/core/src/main/java/org/joda/time/format/StrictISODateTimeFormat.java +++ b/core/src/main/java/org/joda/time/format/StrictISODateTimeFormat.java @@ -32,7 +32,7 @@ * All methods have been marked with an "// ES change" commentary * * In case you compare this with the original ISODateTimeFormat, make sure you use a diff - * call, that ignores whitespaces/tabs/indendetations like 'diff -b' + * call, that ignores whitespaces/tabs/indentations like 'diff -b' */ /** * Factory that creates instances of DateTimeFormatter based on the ISO8601 standard. diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy index 79cb42214ddd3..5b94f28254e58 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy @@ -63,7 +63,7 @@ grant codeBase "${codebase.mocksocket-1.2.jar}" { permission java.net.SocketPermission "*", "accept,connect"; }; -grant codeBase "${codebase.elasticsearch-rest-client-7.0.0-alpha1-SNAPSHOT.jar}" { +grant codeBase "${codebase.elasticsearch-rest-client}" { // rest makes socket connections for rest tests permission java.net.SocketPermission "*", "connect"; // rest client uses system properties which gets the default proxy diff --git a/core/src/test/java/org/elasticsearch/action/DocWriteResponseTests.java b/core/src/test/java/org/elasticsearch/action/DocWriteResponseTests.java index bb1f2d2a637f5..36a72178bbad6 100644 --- a/core/src/test/java/org/elasticsearch/action/DocWriteResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/DocWriteResponseTests.java @@ -25,7 +25,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; @@ -42,7 +42,7 @@ public void testGetLocation() { new ShardId("index", "uuid", 0), "type", "id", - SequenceNumbersService.UNASSIGNED_SEQ_NO, + SequenceNumbers.UNASSIGNED_SEQ_NO, 17, 0, Result.CREATED) {}; @@ -56,7 +56,7 @@ public void testGetLocationNonAscii() { new ShardId("index", "uuid", 0), "type", "❤", - SequenceNumbersService.UNASSIGNED_SEQ_NO, + SequenceNumbers.UNASSIGNED_SEQ_NO, 17, 0, Result.CREATED) {}; @@ -70,7 +70,7 @@ public void testGetLocationWithSpaces() { new ShardId("index", "uuid", 0), "type", "a b", - SequenceNumbersService.UNASSIGNED_SEQ_NO, + SequenceNumbers.UNASSIGNED_SEQ_NO, 17, 0, Result.CREATED) {}; @@ -88,7 +88,7 @@ public void testToXContentDoesntIncludeForcedRefreshUnlessForced() throws IOExce new ShardId("index", "uuid", 0), "type", "id", - SequenceNumbersService.UNASSIGNED_SEQ_NO, + SequenceNumbers.UNASSIGNED_SEQ_NO, 17, 0, Result.CREATED) { diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java index f9785df6495b2..5732d5cc987ca 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java @@ -52,7 +52,7 @@ public void testInitializingOrRelocatingShardExplanation() throws Exception { ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), shardRoutingState); ShardRouting shard = clusterState.getRoutingTable().index("idx").shard(0).primaryShard(); RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.emptyList()), - clusterState.getRoutingNodes(), clusterState, null, System.nanoTime(), randomBoolean()); + clusterState.getRoutingNodes(), clusterState, null, System.nanoTime()); ClusterAllocationExplanation cae = TransportClusterAllocationExplainAction.explainShard(shard, allocation, null, randomBoolean(), new TestGatewayAllocator(), new ShardsAllocator() { @Override @@ -165,6 +165,6 @@ public void testFindShardAssignedToNode() { } private static RoutingAllocation routingAllocation(ClusterState clusterState) { - return new RoutingAllocation(NOOP_DECIDERS, clusterState.getRoutingNodes(), clusterState, null, System.nanoTime(), randomBoolean()); + return new RoutingAllocation(NOOP_DECIDERS, clusterState.getRoutingNodes(), clusterState, null, System.nanoTime()); } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index de5c6690a34c0..8927fed567ed9 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -174,10 +174,10 @@ public TestNode(String name, ThreadPool threadPool, Settings settings) { return discoveryNode.get(); }; transportService = new TransportService(settings, - new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), + new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), new NamedWriteableRegistry(ClusterModule.getNamedWriteables()), new NetworkService(Collections.emptyList())), - threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddressDiscoveryNodeFunction, null) { + threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddressDiscoveryNodeFunction, null) { @Override protected TaskManager createTaskManager() { if (MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.get(settings)) { diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java index a9054879941fa..d3a0d12a85332 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java @@ -150,9 +150,9 @@ public void onFailure(Exception e) { assertNotSame(newState, clusterState); // dry-run=false clusterState = newState; routingTable = clusterState.routingTable(); - assertEquals(routingTable.index("idx").shards().size(), 1); - assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), INITIALIZING); - assertEquals(routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), retries); + assertEquals(1, routingTable.index("idx").shards().size()); + assertEquals(INITIALIZING, routingTable.index("idx").shard(0).shards().get(0).state()); + assertEquals(0, routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations()); } private ClusterState createInitialClusterState(AllocationService service) { diff --git a/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java b/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java index 50fb348834fe3..5141b9cd47187 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java @@ -49,7 +49,7 @@ public class TransportBulkActionTests extends ESTestCase { private TransportService transportService; private ClusterService clusterService; private ThreadPool threadPool; - + private TestTransportBulkAction bulkAction; class TestTransportBulkAction extends TransportBulkAction { @@ -132,4 +132,4 @@ public void testDeleteNonExistingDocExternalGteVersionCreatesIndex() throws Exce throw new AssertionError(exception); })); } -} \ No newline at end of file +} diff --git a/core/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java b/core/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java index 8f4e22e0fd221..8e40c5ea2aad6 100644 --- a/core/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java @@ -26,7 +26,7 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.RandomObjects; @@ -114,8 +114,8 @@ public static Tuple randomDeleteResponse() { int shardId = randomIntBetween(0, 5); String type = randomAlphaOfLength(5); String id = randomAlphaOfLength(5); - long seqNo = randomFrom(SequenceNumbersService.UNASSIGNED_SEQ_NO, randomNonNegativeLong(), (long) randomIntBetween(0, 10000)); - long primaryTerm = seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO ? 0 : randomIntBetween(1, 10000); + long seqNo = randomFrom(SequenceNumbers.UNASSIGNED_SEQ_NO, randomNonNegativeLong(), (long) randomIntBetween(0, 10000)); + long primaryTerm = seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO ? 0 : randomIntBetween(1, 10000); long version = randomBoolean() ? randomNonNegativeLong() : randomIntBetween(0, 10000); boolean found = randomBoolean(); boolean forcedRefresh = randomBoolean(); diff --git a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java index 6816068dff453..6191184ef3f7a 100644 --- a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java @@ -26,10 +26,10 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; @@ -37,7 +37,6 @@ import java.io.IOException; import java.io.UnsupportedEncodingException; import java.util.Arrays; -import java.util.Base64; import java.util.HashSet; import java.util.Set; @@ -135,7 +134,7 @@ public void testIndexResponse() { String id = randomAlphaOfLengthBetween(3, 10); long version = randomLong(); boolean created = randomBoolean(); - IndexResponse indexResponse = new IndexResponse(shardId, type, id, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, version, created); + IndexResponse indexResponse = new IndexResponse(shardId, type, id, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, version, created); int total = randomIntBetween(1, 10); int successful = randomIntBetween(1, 10); ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(total, successful); @@ -155,7 +154,7 @@ public void testIndexResponse() { assertEquals(forcedRefresh, indexResponse.forcedRefresh()); assertEquals("IndexResponse[index=" + shardId.getIndexName() + ",type=" + type + ",id="+ id + ",version=" + version + ",result=" + (created ? "created" : "updated") + - ",seqNo=" + SequenceNumbersService.UNASSIGNED_SEQ_NO + + ",seqNo=" + SequenceNumbers.UNASSIGNED_SEQ_NO + ",primaryTerm=" + 0 + ",shards={\"total\":" + total + ",\"successful\":" + successful + ",\"failed\":0}]", indexResponse.toString()); diff --git a/core/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java b/core/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java index be67834576e2b..926f272ed8339 100644 --- a/core/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.RandomObjects; @@ -127,8 +127,8 @@ public static Tuple randomIndexResponse() { int shardId = randomIntBetween(0, 5); String type = randomAlphaOfLength(5); String id = randomAlphaOfLength(5); - long seqNo = randomFrom(SequenceNumbersService.UNASSIGNED_SEQ_NO, randomNonNegativeLong(), (long) randomIntBetween(0, 10000)); - long primaryTerm = seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO ? 0 : randomIntBetween(1, 10000); + long seqNo = randomFrom(SequenceNumbers.UNASSIGNED_SEQ_NO, randomNonNegativeLong(), (long) randomIntBetween(0, 10000)); + long primaryTerm = seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO ? 0 : randomIntBetween(1, 10000); long version = randomBoolean() ? randomNonNegativeLong() : randomIntBetween(0, 10000); boolean created = randomBoolean(); boolean forcedRefresh = randomBoolean(); diff --git a/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index e6d1e20147b90..7501a7a90be70 100644 --- a/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -72,7 +72,7 @@ public void setup() { public void testSort() throws Exception { List suggestions = new ArrayList<>(); for (int i = 0; i < randomIntBetween(1, 5); i++) { - suggestions.add(new CompletionSuggestion(randomAlphaOfLength(randomIntBetween(1, 5)), randomIntBetween(1, 20))); + suggestions.add(new CompletionSuggestion(randomAlphaOfLength(randomIntBetween(1, 5)), randomIntBetween(1, 20), false)); } int nShards = randomIntBetween(1, 20); int queryResultSize = randomBoolean() ? 0 : randomIntBetween(1, nShards * 2); @@ -139,7 +139,7 @@ public void testMerge() throws IOException { for (int i = 0; i < randomIntBetween(1, 5); i++) { int size = randomIntBetween(1, 20); maxSuggestSize += size; - suggestions.add(new CompletionSuggestion(randomAlphaOfLength(randomIntBetween(1, 5)), size)); + suggestions.add(new CompletionSuggestion(randomAlphaOfLength(randomIntBetween(1, 5)), size, false)); } int nShards = randomIntBetween(1, 20); int queryResultSize = randomBoolean() ? 0 : randomIntBetween(1, nShards * 2); @@ -202,7 +202,7 @@ private AtomicArray generateQueryResults(int nShards, List shardSuggestion = new ArrayList<>(); for (CompletionSuggestion completionSuggestion : suggestions) { CompletionSuggestion suggestion = new CompletionSuggestion( - completionSuggestion.getName(), completionSuggestion.getSize()); + completionSuggestion.getName(), completionSuggestion.getSize(), false); final CompletionSuggestion.Entry completionEntry = new CompletionSuggestion.Entry(new Text(""), 0, 5); suggestion.addTerm(completionEntry); int optionSize = randomIntBetween(1, suggestion.getSize()); diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java index 429266c45892c..50f328db39306 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java @@ -18,11 +18,70 @@ */ package org.elasticsearch.bwcompat; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; + +import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestUtil; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.test.ESIntegTestCase; import static org.hamcrest.Matchers.containsString; -public class RecoveryWithUnsupportedIndicesIT extends StaticIndexBackwardCompatibilityIT { +@LuceneTestCase.SuppressCodecs("*") +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, minNumDataNodes = 0, maxNumDataNodes = 0) +public class RecoveryWithUnsupportedIndicesIT extends ESIntegTestCase { + + /** + * Return settings that could be used to start a node that has the given zipped home directory. + */ + protected Settings prepareBackwardsDataDir(Path backwardsIndex, Object... settings) throws IOException { + Path indexDir = createTempDir(); + Path dataDir = indexDir.resolve("data"); + try (InputStream stream = Files.newInputStream(backwardsIndex)) { + TestUtil.unzip(stream, indexDir); + } + assertTrue(Files.exists(dataDir)); + + // list clusters in the datapath, ignoring anything from extrasfs + final Path[] list; + try (DirectoryStream stream = Files.newDirectoryStream(dataDir)) { + List dirs = new ArrayList<>(); + for (Path p : stream) { + if (!p.getFileName().toString().startsWith("extra")) { + dirs.add(p); + } + } + list = dirs.toArray(new Path[0]); + } + + if (list.length != 1) { + StringBuilder builder = new StringBuilder("Backwards index must contain exactly one cluster\n"); + for (Path line : list) { + builder.append(line.toString()).append('\n'); + } + throw new IllegalStateException(builder.toString()); + } + Path src = list[0].resolve(NodeEnvironment.NODES_FOLDER); + Path dest = dataDir.resolve(NodeEnvironment.NODES_FOLDER); + assertTrue(Files.exists(src)); + Files.move(src, dest); + assertFalse(Files.exists(src)); + assertTrue(Files.exists(dest)); + Settings.Builder builder = Settings.builder() + .put(settings) + .put(Environment.PATH_DATA_SETTING.getKey(), dataDir.toAbsolutePath()); + + return builder.build(); + } + public void testUpgradeStartClusterOn_0_20_6() throws Exception { String indexName = "unsupported-0.20.6"; diff --git a/core/src/test/java/org/elasticsearch/bwcompat/StaticIndexBackwardCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/StaticIndexBackwardCompatibilityIT.java deleted file mode 100644 index 3884d3475e12a..0000000000000 --- a/core/src/test/java/org/elasticsearch/bwcompat/StaticIndexBackwardCompatibilityIT.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.bwcompat; - -import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESIntegTestCase; - -import static org.hamcrest.Matchers.greaterThanOrEqualTo; - -/** - * These tests are against static indexes, built from versions of ES that cannot be upgraded without - * a full cluster restart (ie no wire format compatibility). - */ -@LuceneTestCase.SuppressCodecs("*") -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, minNumDataNodes = 0, maxNumDataNodes = 0) -public class StaticIndexBackwardCompatibilityIT extends ESIntegTestCase { - - public void loadIndex(String index, Object... settings) throws Exception { - logger.info("Checking static index {}", index); - Settings nodeSettings = prepareBackwardsDataDir(getDataPath(index + ".zip"), settings); - internalCluster().startNode(nodeSettings); - ensureGreen(index); - assertIndexSanity(index); - } - - private void assertIndexSanity(String index) { - GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().get(); - assertEquals(1, getIndexResponse.indices().length); - assertEquals(index, getIndexResponse.indices()[0]); - ensureYellow(index); - SearchResponse test = client().prepareSearch(index).get(); - assertThat(test.getHits().getTotalHits(), greaterThanOrEqualTo(1L)); - } - -} diff --git a/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java b/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java index dbe858982090c..9be0d55d77e6a 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java +++ b/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java @@ -40,7 +40,7 @@ import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportServiceAdapter; +import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportStats; import java.io.IOException; @@ -60,7 +60,7 @@ abstract class FailAndRetryMockTransport imp private boolean connectMode = true; - private TransportServiceAdapter transportServiceAdapter; + private TransportService transportService; private final AtomicInteger connectTransportExceptions = new AtomicInteger(); private final AtomicInteger failures = new AtomicInteger(); @@ -90,12 +90,12 @@ public void sendRequest(long requestId, String action, TransportRequest request, //we make sure that nodes get added to the connected ones when calling addTransportAddress, by returning proper nodes info if (connectMode) { if (TransportLivenessAction.NAME.equals(action)) { - TransportResponseHandler transportResponseHandler = transportServiceAdapter.onResponseReceived(requestId); + TransportResponseHandler transportResponseHandler = transportService.onResponseReceived(requestId); transportResponseHandler.handleResponse(new LivenessResponse(ClusterName.CLUSTER_NAME_SETTING. getDefault(Settings.EMPTY), node)); } else if (ClusterStateAction.NAME.equals(action)) { - TransportResponseHandler transportResponseHandler = transportServiceAdapter.onResponseReceived(requestId); + TransportResponseHandler transportResponseHandler = transportService.onResponseReceived(requestId); ClusterState clusterState = getMockClusterState(node); transportResponseHandler.handleResponse(new ClusterStateResponse(clusterName, clusterState, 0L)); } else { @@ -116,7 +116,7 @@ public void sendRequest(long requestId, String action, TransportRequest request, //throw whatever exception that is not a subclass of ConnectTransportException throw new IllegalStateException(); } else { - TransportResponseHandler transportResponseHandler = transportServiceAdapter.onResponseReceived(requestId); + TransportResponseHandler transportResponseHandler = transportService.onResponseReceived(requestId); if (random.nextBoolean()) { successes.incrementAndGet(); transportResponseHandler.handleResponse(newResponse()); @@ -163,8 +163,8 @@ public Set triedNodes() { } @Override - public void transportServiceAdapter(TransportServiceAdapter transportServiceAdapter) { - this.transportServiceAdapter = transportServiceAdapter; + public void setTransportService(TransportService transportServiceAdapter) { + this.transportService = transportServiceAdapter; } @Override diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java index ad24da029e7e0..9ff6ae06d17e7 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java @@ -61,7 +61,6 @@ import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; diff --git a/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java index 2e7a857cc7bc9..51908a45380f0 100644 --- a/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -40,7 +40,6 @@ import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.transport.TransportServiceAdapter; import org.elasticsearch.transport.TransportStats; import org.junit.After; import org.junit.Before; @@ -176,7 +175,7 @@ final class MockTransport implements Transport { volatile boolean randomConnectionExceptions = false; @Override - public void transportServiceAdapter(TransportServiceAdapter service) { + public void setTransportService(TransportService service) { } @Override diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java index 73ff7544ae2c4..b7adc66a55705 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java @@ -69,7 +69,8 @@ public void testReasonOrdinalOrder() { UnassignedInfo.Reason.REINITIALIZED, UnassignedInfo.Reason.REALLOCATED_REPLICA, UnassignedInfo.Reason.PRIMARY_FAILED, - UnassignedInfo.Reason.FORCED_EMPTY_PRIMARY}; + UnassignedInfo.Reason.FORCED_EMPTY_PRIMARY, + UnassignedInfo.Reason.MANUAL_ALLOCATION,}; for (int i = 0; i < order.length; i++) { assertThat(order[i].ordinal(), equalTo(i)); } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java index a63447e845b18..405f459e99a39 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java @@ -368,8 +368,7 @@ public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation alloca private RoutingAllocation newRoutingAllocation(AllocationDeciders deciders, ClusterState state) { RoutingAllocation allocation = new RoutingAllocation( - deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, System.nanoTime(), false - ); + deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, System.nanoTime()); allocation.debugDecision(true); return allocation; } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java index 31e2330a600e8..b4ecb6d873d46 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java @@ -113,7 +113,7 @@ public void testSingleRetryOnIgnore() { assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), UNASSIGNED); assertEquals(routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getMessage(), "boom"); - // manual reroute should retry once + // manual resetting of retry count newState = strategy.reroute(clusterState, new AllocationCommands(), false, true).getClusterState(); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; @@ -121,11 +121,12 @@ public void testSingleRetryOnIgnore() { clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); assertEquals(routingTable.index("idx").shards().size(), 1); - assertEquals(routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), retries); - assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), INITIALIZING); + assertEquals(0, routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations()); + assertEquals(INITIALIZING, routingTable.index("idx").shard(0).shards().get(0).state()); assertEquals(routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getMessage(), "boom"); - // now we go and check that we are actually stick to unassigned on the next failure ie. no retry + // again fail it N-1 times + for (int i = 0; i < retries-1; i++) { failedShards = Collections.singletonList( new FailedShard(routingTable.index("idx").shard(0).shards().get(0), "boom", new UnsupportedOperationException())); @@ -135,10 +136,23 @@ public void testSingleRetryOnIgnore() { clusterState = newState; routingTable = newState.routingTable(); assertEquals(routingTable.index("idx").shards().size(), 1); - assertEquals(routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), retries+1); - assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), UNASSIGNED); + assertEquals(i + 1, routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations()); + assertEquals(INITIALIZING, routingTable.index("idx").shard(0).shards().get(0).state()); assertEquals(routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getMessage(), "boom"); + } + // now we go and check that we are actually stick to unassigned on the next failure + failedShards = Collections.singletonList( + new FailedShard(routingTable.index("idx").shard(0).shards().get(0), "boom", + new UnsupportedOperationException())); + newState = strategy.applyFailedShards(clusterState, failedShards); + assertThat(newState, not(equalTo(clusterState))); + clusterState = newState; + routingTable = newState.routingTable(); + assertEquals(routingTable.index("idx").shards().size(), 1); + assertEquals(retries, routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations()); + assertEquals(UNASSIGNED, routingTable.index("idx").shard(0).shards().get(0).state()); + assertEquals("boom", routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getMessage()); } public void testFailedAllocation() { @@ -161,7 +175,7 @@ public void testFailedAllocation() { assertEquals(unassignedPrimary.unassignedInfo().getMessage(), "boom" + i); // MaxRetryAllocationDecider#canForceAllocatePrimary should return YES decisions because canAllocate returns YES here assertEquals(Decision.YES, new MaxRetryAllocationDecider(Settings.EMPTY).canForceAllocatePrimary( - unassignedPrimary, null, new RoutingAllocation(null, null, clusterState, null, 0, false))); + unassignedPrimary, null, new RoutingAllocation(null, null, clusterState, null, 0))); } // now we go and check that we are actually stick to unassigned on the next failure { @@ -179,7 +193,7 @@ public void testFailedAllocation() { assertEquals(unassignedPrimary.unassignedInfo().getMessage(), "boom"); // MaxRetryAllocationDecider#canForceAllocatePrimary should return a NO decision because canAllocate returns NO here assertEquals(Decision.NO, new MaxRetryAllocationDecider(Settings.EMPTY).canForceAllocatePrimary( - unassignedPrimary, null, new RoutingAllocation(null, null, clusterState, null, 0, false))); + unassignedPrimary, null, new RoutingAllocation(null, null, clusterState, null, 0))); } // change the settings and ensure we can do another round of allocation for that index. @@ -201,7 +215,7 @@ public void testFailedAllocation() { assertEquals(unassignedPrimary.unassignedInfo().getMessage(), "boom"); // bumped up the max retry count, so canForceAllocatePrimary should return a YES decision assertEquals(Decision.YES, new MaxRetryAllocationDecider(Settings.EMPTY).canForceAllocatePrimary( - routingTable.index("idx").shard(0).shards().get(0), null, new RoutingAllocation(null, null, clusterState, null, 0, false))); + routingTable.index("idx").shard(0).shards().get(0), null, new RoutingAllocation(null, null, clusterState, null, 0))); // now we start the shard clusterState = strategy.applyStartedShards(clusterState, Collections.singletonList( @@ -228,7 +242,7 @@ public void testFailedAllocation() { assertEquals(unassignedPrimary.unassignedInfo().getMessage(), "ZOOOMG"); // Counter reset, so MaxRetryAllocationDecider#canForceAllocatePrimary should return a YES decision assertEquals(Decision.YES, new MaxRetryAllocationDecider(Settings.EMPTY).canForceAllocatePrimary( - unassignedPrimary, null, new RoutingAllocation(null, null, clusterState, null, 0, false))); + unassignedPrimary, null, new RoutingAllocation(null, null, clusterState, null, 0))); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java index 73332fcdce9d0..4b74cee867138 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java @@ -106,8 +106,7 @@ public void testForceAllocatePrimaryOnSameNodeNotAllowed() { ShardRouting primaryShard = clusterState.routingTable().index(index).shard(0).primaryShard(); RoutingNode routingNode = clusterState.getRoutingNodes().node(primaryShard.currentNodeId()); RoutingAllocation routingAllocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.emptyList()), - new RoutingNodes(clusterState, false), clusterState, ClusterInfo.EMPTY, System.nanoTime(), false - ); + new RoutingNodes(clusterState, false), clusterState, ClusterInfo.EMPTY, System.nanoTime()); // can't force allocate same shard copy to the same node ShardRouting newPrimary = TestShardRouting.newShardRouting(primaryShard.shardId(), null, true, ShardRoutingState.UNASSIGNED); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index 2c7df5fee20de..58d19fb61cf05 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -841,7 +841,7 @@ public void testCanRemainWithShardRelocatingAway() { ); ClusterState clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build(); RoutingAllocation routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, - System.nanoTime(), false); + System.nanoTime()); routingAllocation.debugDecision(true); Decision decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation); assertThat(decision.type(), equalTo(Decision.Type.NO)); @@ -867,8 +867,7 @@ public void testCanRemainWithShardRelocatingAway() { ) ); clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build(); - routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, System.nanoTime(), - false); + routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, System.nanoTime()); routingAllocation.debugDecision(true); decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation); assertThat(decision.type(), equalTo(Decision.Type.YES)); @@ -976,7 +975,7 @@ public void testForSingleDataNode() { ); ClusterState clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build(); RoutingAllocation routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, - System.nanoTime(), false); + System.nanoTime()); routingAllocation.debugDecision(true); Decision decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation); @@ -1036,8 +1035,7 @@ Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLU ); clusterState = ClusterState.builder(updateClusterState).routingTable(builder.build()).build(); - routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, System.nanoTime(), - false); + routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, System.nanoTime()); routingAllocation.debugDecision(true); decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation); assertThat(decision.type(), equalTo(Decision.Type.YES)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index 7379ee78d03bd..3676ca8bd6e85 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -98,7 +98,7 @@ public void testCanAllocateUsesMaxAvailableSpace() { ImmutableOpenMap.Builder shardSizes = ImmutableOpenMap.builder(); shardSizes.put("[test][0][p]", 10L); // 10 bytes final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), shardSizes.build(), ImmutableOpenMap.of()); - RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.singleton(decider)), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime(), false); + RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.singleton(decider)), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime()); allocation.debugDecision(true); Decision decision = decider.canAllocate(test_0, new RoutingNode("node_0", node_0), allocation); assertEquals(mostAvailableUsage.toString(), Decision.Type.YES, decision.type()); @@ -172,7 +172,7 @@ public void testCanRemainUsesLeastAvailableSpace() { shardSizes.put("[test][2][p]", 10L); final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), shardSizes.build(), shardRoutingMap.build()); - RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.singleton(decider)), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime(), false); + RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.singleton(decider)), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime()); allocation.debugDecision(true); Decision decision = decider.canRemain(test_0, new RoutingNode("node_0", node_0), allocation); assertEquals(Decision.Type.YES, decision.type()); @@ -224,7 +224,7 @@ public void testShardSizeAndRelocatingSize() { routingTableBuilder.addAsNew(metaData.index("other")); ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData).routingTable(routingTableBuilder.build()).build(); - RoutingAllocation allocation = new RoutingAllocation(null, null, clusterState, info, 0, false); + RoutingAllocation allocation = new RoutingAllocation(null, null, clusterState, info, 0); final Index index = new Index("test", "1234"); ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); @@ -305,7 +305,7 @@ public void testSizeShrinkIndex() { clusterState = allocationService.applyStartedShards(clusterState, clusterState.getRoutingTable().index("test").shardsWithState(ShardRoutingState.UNASSIGNED)); - RoutingAllocation allocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, info, 0, false); + RoutingAllocation allocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, info, 0); final Index index = new Index("test", "1234"); ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), true, diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java index 711e8af13db2a..838da8bf7823d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java @@ -73,7 +73,7 @@ public void testFilterInitialRecovery() { // after failing the shard we are unassigned since the node is blacklisted and we can't initialize on the other node RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, - null, 0, false); + null, 0); allocation.debugDecision(true); Decision.Single decision = (Decision.Single) filterAllocationDecider.canAllocate( routingTable.index("idx").shard(0).primaryShard(), @@ -124,7 +124,7 @@ public void testFilterInitialRecovery() { assertEquals(routingTable.index("idx").shard(0).primaryShard().currentNodeId(), "node1"); allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, - null, 0, false); + null, 0); allocation.debugDecision(true); decision = (Decision.Single) filterAllocationDecider.canAllocate( routingTable.index("idx").shard(0).shards().get(0), diff --git a/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java b/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java index 7dbaba02897c6..5675a7b524bd3 100644 --- a/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java +++ b/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java @@ -319,6 +319,29 @@ protected long now() { } } + public void testComputeIfAbsentAfterExpiration() throws ExecutionException { + AtomicLong now = new AtomicLong(); + Cache cache = new Cache() { + @Override + protected long now() { + return now.get(); + } + }; + cache.setExpireAfterAccessNanos(1); + now.set(0); + for (int i = 0; i < numberOfEntries; i++) { + cache.put(i, Integer.toString(i) + "-first"); + } + now.set(2); + for (int i = 0; i < numberOfEntries; i++) { + cache.computeIfAbsent(i, k -> Integer.toString(k) + "-second"); + } + for (int i = 0; i < numberOfEntries; i++) { + assertEquals(i + "-second", cache.get(i)); + } + assertEquals(numberOfEntries, cache.stats().getEvictions()); + } + // randomly promote some entries, step the clock forward, then check that the promoted entries remain and the // non-promoted entries were removed public void testPromotion() { diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java index db73e363f4d21..5a5f1c86c5f36 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -359,6 +359,12 @@ public void set(Integer a, Integer b) { this.a = a; this.b = b; } + + public void validate(Integer a, Integer b) { + if (Integer.signum(a) != Integer.signum(b)) { + throw new IllegalArgumentException("boom"); + } + } } @@ -366,7 +372,7 @@ public void testComposite() { Composite c = new Composite(); Setting a = Setting.intSetting("foo.int.bar.a", 1, Property.Dynamic, Property.NodeScope); Setting b = Setting.intSetting("foo.int.bar.b", 1, Property.Dynamic, Property.NodeScope); - ClusterSettings.SettingUpdater> settingUpdater = Setting.compoundUpdater(c::set, a, b, logger); + ClusterSettings.SettingUpdater> settingUpdater = Setting.compoundUpdater(c::set, c::validate, a, b, logger); assertFalse(settingUpdater.apply(Settings.EMPTY, Settings.EMPTY)); assertNull(c.a); assertNull(c.b); @@ -392,6 +398,40 @@ public void testComposite() { } + public void testCompositeValidator() { + Composite c = new Composite(); + Setting a = Setting.intSetting("foo.int.bar.a", 1, Property.Dynamic, Property.NodeScope); + Setting b = Setting.intSetting("foo.int.bar.b", 1, Property.Dynamic, Property.NodeScope); + ClusterSettings.SettingUpdater> settingUpdater = Setting.compoundUpdater(c::set, c::validate, a, b, logger); + assertFalse(settingUpdater.apply(Settings.EMPTY, Settings.EMPTY)); + assertNull(c.a); + assertNull(c.b); + + Settings build = Settings.builder().put("foo.int.bar.a", 2).build(); + assertTrue(settingUpdater.apply(build, Settings.EMPTY)); + assertEquals(2, c.a.intValue()); + assertEquals(1, c.b.intValue()); + + Integer aValue = c.a; + assertFalse(settingUpdater.apply(build, build)); + assertSame(aValue, c.a); + Settings previous = build; + build = Settings.builder().put("foo.int.bar.a", 2).put("foo.int.bar.b", 5).build(); + assertTrue(settingUpdater.apply(build, previous)); + assertEquals(2, c.a.intValue()); + assertEquals(5, c.b.intValue()); + + Settings invalid = Settings.builder().put("foo.int.bar.a", -2).put("foo.int.bar.b", 5).build(); + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> settingUpdater.apply(invalid, previous)); + assertThat(exc.getMessage(), equalTo("boom")); + + // reset to default + assertTrue(settingUpdater.apply(Settings.EMPTY, build)); + assertEquals(1, c.a.intValue()); + assertEquals(1, c.b.intValue()); + + } + public void testListSettings() { Setting> listSetting = Setting.listSetting("foo.bar", Arrays.asList("foo,bar"), (s) -> s.toString(), Property.Dynamic, Property.NodeScope); diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java index 39a9dbff959c6..8c2d84cd8c89d 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java @@ -20,6 +20,8 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.cluster.service.MasterService; @@ -40,10 +42,12 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiConsumer; import java.util.function.Supplier; import static org.mockito.Mockito.mock; @@ -160,7 +164,23 @@ public void testDuplicateHostsProvider() { public void testLazyConstructionHostsProvider() { DummyHostsProviderPlugin plugin = () -> Collections.singletonMap("custom", - () -> { throw new AssertionError("created hosts provider which was not selected"); }); + () -> { + throw new AssertionError("created hosts provider which was not selected"); + }); newModule(Settings.EMPTY, Collections.singletonList(plugin)); } + + public void testJoinValidator() { + BiConsumer consumer = (a, b) -> {}; + DiscoveryModule module = newModule(Settings.EMPTY, Collections.singletonList(new DiscoveryPlugin() { + @Override + public BiConsumer getJoinValidator() { + return consumer; + } + })); + ZenDiscovery discovery = (ZenDiscovery) module.getDiscovery(); + Collection> onJoinValidators = discovery.getOnJoinValidators(); + assertEquals(2, onJoinValidators.size()); + assertTrue(onJoinValidators.contains(consumer)); + } } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java index bc653e14e3275..b0dc783349ca8 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java @@ -320,7 +320,8 @@ public void onNewClusterState(String source, Supplier clusterState } }; ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, service, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()), - masterService, clusterApplier, clusterSettings, Collections::emptyList, ESAllocationTestCase.createAllocationService()); + masterService, clusterApplier, clusterSettings, Collections::emptyList, ESAllocationTestCase.createAllocationService(), + Collections.emptyList()); zenDiscovery.start(); return zenDiscovery; } @@ -342,7 +343,10 @@ public void testValidateOnUnsupportedIndexVersionCreated() throws Exception { ClusterState.Builder stateBuilder = ClusterState.builder(ClusterName.DEFAULT); final DiscoveryNode otherNode = new DiscoveryNode("other_node", buildNewFakeTransportAddress(), emptyMap(), EnumSet.allOf(DiscoveryNode.Role.class), Version.CURRENT); - MembershipAction.ValidateJoinRequestRequestHandler request = new MembershipAction.ValidateJoinRequestRequestHandler(); + final DiscoveryNode localNode = new DiscoveryNode("other_node", buildNewFakeTransportAddress(), emptyMap(), + EnumSet.allOf(DiscoveryNode.Role.class), Version.CURRENT); + MembershipAction.ValidateJoinRequestRequestHandler request = new MembershipAction.ValidateJoinRequestRequestHandler + (() -> localNode, ZenDiscovery.addBuiltInJoinValidators(Collections.emptyList())); final boolean incompatible = randomBoolean(); IndexMetaData indexMetaData = IndexMetaData.builder("test").settings(Settings.builder() .put(SETTING_VERSION_CREATED, incompatible ? VersionUtils.getPreviousVersion(Version.CURRENT.minimumIndexCompatibilityVersion()) diff --git a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java index e91017ecdf913..e3687548190a3 100644 --- a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java @@ -388,7 +388,7 @@ private RoutingAllocation getRestoreRoutingAllocation(AllocationDeciders allocat .metaData(metaData) .routingTable(routingTable) .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build(); - return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state, null, System.nanoTime(), false); + return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state, null, System.nanoTime()); } private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas(AllocationDeciders deciders, UnassignedInfo.Reason reason, @@ -416,7 +416,7 @@ private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas(AllocationDe .metaData(metaData) .routingTable(routingTableBuilder.build()) .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build(); - return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, null, System.nanoTime(), false); + return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, null, System.nanoTime()); } private void assertClusterHealthStatus(RoutingAllocation allocation, ClusterHealthStatus expectedStatus) { diff --git a/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java index 133c8e3381605..f53c8da2f2d96 100644 --- a/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java @@ -316,7 +316,7 @@ private RoutingAllocation onePrimaryOnNode1And1Replica(AllocationDeciders decide .metaData(metaData) .routingTable(routingTable) .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build(); - return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, System.nanoTime(), false); + return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, System.nanoTime()); } private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders) { @@ -338,7 +338,7 @@ private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDecid .metaData(metaData) .routingTable(routingTable) .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build(); - return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, System.nanoTime(), false); + return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, System.nanoTime()); } class TestAllocator extends ReplicaShardAllocator { diff --git a/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java b/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java index c6fd5d4fbdfd5..dad2b4e7d9153 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java @@ -290,6 +290,26 @@ public void testMaxResultWindow() { assertEquals(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY).intValue(), settings.getMaxResultWindow()); } + public void testMaxInnerResultWindow() { + IndexMetaData metaData = newIndexMeta("index", Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), 200) + .build()); + IndexSettings settings = new IndexSettings(metaData, Settings.EMPTY); + assertEquals(200, settings.getMaxInnerResultWindow()); + settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), + 50).build())); + assertEquals(50, settings.getMaxInnerResultWindow()); + settings.updateIndexMetaData(newIndexMeta("index", Settings.EMPTY)); + assertEquals(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.get(Settings.EMPTY).intValue(), settings.getMaxInnerResultWindow()); + + metaData = newIndexMeta("index", Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .build()); + settings = new IndexSettings(metaData, Settings.EMPTY); + assertEquals(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.get(Settings.EMPTY).intValue(), settings.getMaxInnerResultWindow()); + } + public void testMaxAdjacencyMatrixFiltersSetting() { IndexMetaData metaData = newIndexMeta("index", Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index f0d9ffe99e4fc..c75926bbc0171 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -346,7 +346,7 @@ protected Translog createTranslog() throws IOException { protected Translog createTranslog(Path translogPath) throws IOException { TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE); - return new Translog(translogConfig, null, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); + return new Translog(translogConfig, null, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); } protected InternalEngine createEngine(Store store, Path translogPath) throws IOException { @@ -721,9 +721,9 @@ public void testSegmentsStatsIncludingFileSizes() throws Exception { } public void testCommitStats() throws IOException { - final AtomicLong maxSeqNo = new AtomicLong(SequenceNumbersService.NO_OPS_PERFORMED); - final AtomicLong localCheckpoint = new AtomicLong(SequenceNumbersService.NO_OPS_PERFORMED); - final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbersService.UNASSIGNED_SEQ_NO); + final AtomicLong maxSeqNo = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final AtomicLong localCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); try ( Store store = createStore(); InternalEngine engine = createEngine(store, createTempDir(), (config) -> new SequenceNumbersService( @@ -740,19 +740,19 @@ public void testCommitStats() throws IOException { assertThat(stats1.getUserData(), hasKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); assertThat( Long.parseLong(stats1.getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)), - equalTo(SequenceNumbersService.NO_OPS_PERFORMED)); + equalTo(SequenceNumbers.NO_OPS_PERFORMED)); assertThat(stats1.getUserData(), hasKey(SequenceNumbers.MAX_SEQ_NO)); assertThat( Long.parseLong(stats1.getUserData().get(SequenceNumbers.MAX_SEQ_NO)), - equalTo(SequenceNumbersService.NO_OPS_PERFORMED)); + equalTo(SequenceNumbers.NO_OPS_PERFORMED)); - maxSeqNo.set(rarely() ? SequenceNumbersService.NO_OPS_PERFORMED : randomIntBetween(0, 1024)); + maxSeqNo.set(rarely() ? SequenceNumbers.NO_OPS_PERFORMED : randomIntBetween(0, 1024)); localCheckpoint.set( - rarely() || maxSeqNo.get() == SequenceNumbersService.NO_OPS_PERFORMED ? - SequenceNumbersService.NO_OPS_PERFORMED : randomIntBetween(0, 1024)); - globalCheckpoint.set(rarely() || localCheckpoint.get() == SequenceNumbersService.NO_OPS_PERFORMED ? - SequenceNumbersService.UNASSIGNED_SEQ_NO : randomIntBetween(0, (int) localCheckpoint.get())); + rarely() || maxSeqNo.get() == SequenceNumbers.NO_OPS_PERFORMED ? + SequenceNumbers.NO_OPS_PERFORMED : randomIntBetween(0, 1024)); + globalCheckpoint.set(rarely() || localCheckpoint.get() == SequenceNumbers.NO_OPS_PERFORMED ? + SequenceNumbers.UNASSIGNED_SEQ_NO : randomIntBetween(0, (int) localCheckpoint.get())); engine.flush(true, true); @@ -827,11 +827,11 @@ public void testTranslogMultipleOperationsSameDocument() throws IOException { for (int i = 0; i < ops; i++) { final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), SOURCE, null); if (randomBoolean()) { - final Engine.Index operation = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false); + final Engine.Index operation = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false); operations.add(operation); initialEngine.index(operation); } else { - final Engine.Delete operation = new Engine.Delete("test", "1", newUid(doc), SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime()); + final Engine.Delete operation = new Engine.Delete("test", "1", newUid(doc), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime()); operations.add(operation); initialEngine.delete(operation); } @@ -902,9 +902,9 @@ public void testTranslogRecoveryWithMultipleGenerations() throws IOException { new SequenceNumbersService( config.getShardId(), config.getIndexSettings(), - SequenceNumbersService.NO_OPS_PERFORMED, - SequenceNumbersService.NO_OPS_PERFORMED, - SequenceNumbersService.UNASSIGNED_SEQ_NO) { + SequenceNumbers.NO_OPS_PERFORMED, + SequenceNumbers.NO_OPS_PERFORMED, + SequenceNumbers.UNASSIGNED_SEQ_NO) { @Override public long generateSeqNo() { return seqNos.get(counter.getAndIncrement()); @@ -1215,7 +1215,7 @@ public void testRenewSyncFlush() throws Exception { final boolean forceMergeFlushes = randomBoolean(); final ParsedDocument parsedDoc3 = testParsedDocument("3", null, testDocumentWithTextField(), B_1, null); if (forceMergeFlushes) { - engine.index(new Engine.Index(newUid(parsedDoc3), parsedDoc3, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime() - engine.engineConfig.getFlushMergesAfter().nanos(), -1, false)); + engine.index(new Engine.Index(newUid(parsedDoc3), parsedDoc3, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime() - engine.engineConfig.getFlushMergesAfter().nanos(), -1, false)); } else { engine.index(indexForDoc(parsedDoc3)); } @@ -1417,11 +1417,11 @@ public void run() { public void testVersioningCreateExistsException() throws IOException { ParsedDocument doc = testParsedDocument("1", null, testDocument(), B_1, null); - Engine.Index create = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); + Engine.Index create = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); Engine.IndexResult indexResult = engine.index(create); assertThat(indexResult.getVersion(), equalTo(1L)); - create = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); + create = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); indexResult = engine.index(create); assertTrue(indexResult.hasFailure()); assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); @@ -1462,7 +1462,7 @@ protected List generateSingleDocHistory(boolean forReplica, Ve } if (randomBoolean()) { op = new Engine.Index(id, testParsedDocument("1", null, testDocumentWithTextField(valuePrefix + i), B_1, null), - forReplica && i >= startWithSeqNo ? i * 2 : SequenceNumbersService.UNASSIGNED_SEQ_NO, + forReplica && i >= startWithSeqNo ? i * 2 : SequenceNumbers.UNASSIGNED_SEQ_NO, forReplica && i >= startWithSeqNo && incrementTermWhenIntroducingSeqNo ? primaryTerm + 1 : primaryTerm, version, forReplica ? versionType.versionTypeForReplicationAndRecovery() : versionType, @@ -1471,7 +1471,7 @@ protected List generateSingleDocHistory(boolean forReplica, Ve ); } else { op = new Engine.Delete("test", "1", id, - forReplica && i >= startWithSeqNo ? i * 2 : SequenceNumbersService.UNASSIGNED_SEQ_NO, + forReplica && i >= startWithSeqNo ? i * 2 : SequenceNumbers.UNASSIGNED_SEQ_NO, forReplica && i >= startWithSeqNo && incrementTermWhenIntroducingSeqNo ? primaryTerm + 1 : primaryTerm, version, forReplica ? versionType.versionTypeForReplicationAndRecovery() : versionType, @@ -1900,7 +1900,7 @@ class OpAndVersion { Engine.Index index = new Engine.Index(uidTerm, testParsedDocument("1", null, testDocument(), bytesArray(Strings.collectionToCommaDelimitedString(values)), null), - SequenceNumbersService.UNASSIGNED_SEQ_NO, 2, + SequenceNumbers.UNASSIGNED_SEQ_NO, 2, get.version(), VersionType.INTERNAL, PRIMARY, System.currentTimeMillis(), -1, false); Engine.IndexResult indexResult = engine.index(index); @@ -2017,13 +2017,13 @@ public void testIndexWriterInfoStream() throws IllegalAccessException, IOExcepti public void testSeqNoAndCheckpoints() throws IOException { final int opCount = randomIntBetween(1, 256); - long primarySeqNo = SequenceNumbersService.NO_OPS_PERFORMED; + long primarySeqNo = SequenceNumbers.NO_OPS_PERFORMED; final String[] ids = new String[]{"1", "2", "3"}; final Set indexedIds = new HashSet<>(); - long localCheckpoint = SequenceNumbersService.NO_OPS_PERFORMED; - long replicaLocalCheckpoint = SequenceNumbersService.NO_OPS_PERFORMED; - long globalCheckpoint = SequenceNumbersService.UNASSIGNED_SEQ_NO; - long maxSeqNo = SequenceNumbersService.NO_OPS_PERFORMED; + long localCheckpoint = SequenceNumbers.NO_OPS_PERFORMED; + long replicaLocalCheckpoint = SequenceNumbers.NO_OPS_PERFORMED; + long globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; + long maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED; InternalEngine initialEngine = null; try { @@ -2041,7 +2041,7 @@ public void testSeqNoAndCheckpoints() throws IOException { // we have some docs indexed, so delete one of them id = randomFrom(indexedIds); final Engine.Delete delete = new Engine.Delete( - "test", id, newUid(id), SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, + "test", id, newUid(id), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, rarely() ? 100 : Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, 0); final Engine.DeleteResult result = initialEngine.delete(delete); if (!result.hasFailure()) { @@ -2050,7 +2050,7 @@ public void testSeqNoAndCheckpoints() throws IOException { indexedIds.remove(id); primarySeqNo++; } else { - assertThat(result.getSeqNo(), equalTo(SequenceNumbersService.UNASSIGNED_SEQ_NO)); + assertThat(result.getSeqNo(), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); assertThat(initialEngine.seqNoService().getMaxSeqNo(), equalTo(primarySeqNo)); } } else { @@ -2058,7 +2058,7 @@ public void testSeqNoAndCheckpoints() throws IOException { id = randomFrom(ids); ParsedDocument doc = testParsedDocument(id, null, testDocumentWithTextField(), SOURCE, null); final Engine.Index index = new Engine.Index(newUid(doc), doc, - SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, + SequenceNumbers.UNASSIGNED_SEQ_NO, 0, rarely() ? 100 : Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, 0, -1, false); final Engine.IndexResult result = initialEngine.index(index); @@ -2068,7 +2068,7 @@ public void testSeqNoAndCheckpoints() throws IOException { indexedIds.add(id); primarySeqNo++; } else { - assertThat(result.getSeqNo(), equalTo(SequenceNumbersService.UNASSIGNED_SEQ_NO)); + assertThat(result.getSeqNo(), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); assertThat(initialEngine.seqNoService().getMaxSeqNo(), equalTo(primarySeqNo)); } } @@ -2192,17 +2192,17 @@ public void testConcurrentWritesAndCommits() throws Exception { } while (doneIndexing == false); // now, verify all the commits have the correct docs according to the user commit data - long prevLocalCheckpoint = SequenceNumbersService.NO_OPS_PERFORMED; - long prevMaxSeqNo = SequenceNumbersService.NO_OPS_PERFORMED; + long prevLocalCheckpoint = SequenceNumbers.NO_OPS_PERFORMED; + long prevMaxSeqNo = SequenceNumbers.NO_OPS_PERFORMED; for (Engine.IndexCommitRef commitRef : commits) { final IndexCommit commit = commitRef.getIndexCommit(); Map userData = commit.getUserData(); long localCheckpoint = userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) ? Long.parseLong(userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) : - SequenceNumbersService.NO_OPS_PERFORMED; + SequenceNumbers.NO_OPS_PERFORMED; long maxSeqNo = userData.containsKey(SequenceNumbers.MAX_SEQ_NO) ? Long.parseLong(userData.get(SequenceNumbers.MAX_SEQ_NO)) : - SequenceNumbersService.UNASSIGNED_SEQ_NO; + SequenceNumbers.UNASSIGNED_SEQ_NO; // local checkpoint and max seq no shouldn't go backwards assertThat(localCheckpoint, greaterThanOrEqualTo(prevLocalCheckpoint)); assertThat(maxSeqNo, greaterThanOrEqualTo(prevMaxSeqNo)); @@ -2212,7 +2212,7 @@ public void testConcurrentWritesAndCommits() throws Exception { if (highest != null) { highestSeqNo = highest.longValue(); } else { - highestSeqNo = SequenceNumbersService.NO_OPS_PERFORMED; + highestSeqNo = SequenceNumbers.NO_OPS_PERFORMED; } // make sure localCheckpoint <= highest seq no found <= maxSeqNo assertThat(highestSeqNo, greaterThanOrEqualTo(localCheckpoint)); @@ -2317,10 +2317,10 @@ public void testEnableGcDeletes() throws Exception { document.add(new TextField("value", "test1", Field.Store.YES)); ParsedDocument doc = testParsedDocument("1", null, document, B_2, null); - engine.index(new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, 1, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false)); + engine.index(new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 1, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false)); // Delete document we just added: - engine.delete(new Engine.Delete("test", "1", newUid(doc), SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime())); + engine.delete(new Engine.Delete("test", "1", newUid(doc), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime())); // Get should not find the document Engine.GetResult getResult = engine.get(newGet(true, doc), searcherFactory); @@ -2334,14 +2334,14 @@ public void testEnableGcDeletes() throws Exception { } // Delete non-existent document - engine.delete(new Engine.Delete("test", "2", newUid("2"), SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime())); + engine.delete(new Engine.Delete("test", "2", newUid("2"), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime())); // Get should not find the document (we never indexed uid=2): getResult = engine.get(new Engine.Get(true, "type", "2", newUid("2")), searcherFactory); assertThat(getResult.exists(), equalTo(false)); // Try to index uid=1 with a too-old version, should fail: - Engine.Index index = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false); + Engine.Index index = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false); Engine.IndexResult indexResult = engine.index(index); assertTrue(indexResult.hasFailure()); assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); @@ -2351,7 +2351,7 @@ public void testEnableGcDeletes() throws Exception { assertThat(getResult.exists(), equalTo(false)); // Try to index uid=2 with a too-old version, should fail: - Engine.Index index1 = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false); + Engine.Index index1 = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false); indexResult = engine.index(index1); assertTrue(indexResult.hasFailure()); assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); @@ -2469,7 +2469,7 @@ public void testTranslogReplayWithFailure() throws IOException { final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); + Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); Engine.IndexResult indexResult = engine.index(firstIndexRequest); assertThat(indexResult.getVersion(), equalTo(1L)); } @@ -2568,7 +2568,7 @@ public void testSkipTranslogReplay() throws IOException { final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); + Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); Engine.IndexResult indexResult = engine.index(firstIndexRequest); assertThat(indexResult.getVersion(), equalTo(1L)); } @@ -2603,7 +2603,7 @@ public void testTranslogReplay() throws IOException { final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); + Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); Engine.IndexResult indexResult = engine.index(firstIndexRequest); assertThat(indexResult.getVersion(), equalTo(1L)); } @@ -2635,7 +2635,7 @@ public void testTranslogReplay() throws IOException { final boolean flush = randomBoolean(); int randomId = randomIntBetween(numDocs + 1, numDocs + 10); ParsedDocument doc = testParsedDocument(Integer.toString(randomId), null, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, 1, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false); + Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 1, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false); Engine.IndexResult indexResult = engine.index(firstIndexRequest); assertThat(indexResult.getVersion(), equalTo(1L)); if (flush) { @@ -2643,7 +2643,7 @@ public void testTranslogReplay() throws IOException { } doc = testParsedDocument(Integer.toString(randomId), null, testDocument(), new BytesArray("{}"), null); - Engine.Index idxRequest = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, 2, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false); + Engine.Index idxRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 2, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false); Engine.IndexResult result = engine.index(idxRequest); engine.refresh("test"); assertThat(result.getVersion(), equalTo(2L)); @@ -2768,7 +2768,7 @@ public void testRecoverFromForeignTranslog() throws IOException { final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); + Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); Engine.IndexResult index = engine.index(firstIndexRequest); assertThat(index.getVersion(), equalTo(1L)); } @@ -2778,7 +2778,7 @@ public void testRecoverFromForeignTranslog() throws IOException { Translog translog = new Translog( new TranslogConfig(shardId, createTempDir(), INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), - null, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); + null, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); translog.add(new Translog.Index("test", "SomeBogusId", 0, "{}".getBytes(Charset.forName("UTF-8")))); assertEquals(generation.translogFileGeneration, translog.currentFileGeneration()); translog.close(); @@ -2904,7 +2904,7 @@ public void testCurrentTranslogIDisCommitted() throws IOException { // create { ParsedDocument doc = testParsedDocument(Integer.toString(0), null, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); + Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG))){ assertFalse(engine.isRecovering()); @@ -3254,7 +3254,7 @@ public void testRetryWithAutogeneratedIdWorksAndNoDuplicateDocs() throws IOExcep boolean isRetry = false; long autoGeneratedIdTimestamp = 0; - Engine.Index index = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + Engine.Index index = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); Engine.IndexResult indexResult = engine.index(index); assertThat(indexResult.getVersion(), equalTo(1L)); @@ -3263,7 +3263,7 @@ public void testRetryWithAutogeneratedIdWorksAndNoDuplicateDocs() throws IOExcep assertThat(indexResult.getVersion(), equalTo(1L)); isRetry = true; - index = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + index = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); indexResult = engine.index(index); assertThat(indexResult.getVersion(), equalTo(1L)); engine.refresh("test"); @@ -3288,7 +3288,7 @@ public void testRetryWithAutogeneratedIdsAndWrongOrderWorksAndNoDuplicateDocs() boolean isRetry = true; long autoGeneratedIdTimestamp = 0; - Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); Engine.IndexResult result = engine.index(firstIndexRequest); assertThat(result.getVersion(), equalTo(1L)); @@ -3297,7 +3297,7 @@ public void testRetryWithAutogeneratedIdsAndWrongOrderWorksAndNoDuplicateDocs() assertThat(indexReplicaResult.getVersion(), equalTo(1L)); isRetry = false; - Engine.Index secondIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + Engine.Index secondIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); Engine.IndexResult indexResult = engine.index(secondIndexRequest); assertTrue(indexResult.isCreated()); engine.refresh("test"); @@ -3324,7 +3324,7 @@ public Engine.Index randomAppendOnly(ParsedDocument doc, boolean retry, final lo } public Engine.Index appendOnlyPrimary(ParsedDocument doc, boolean retry, final long autoGeneratedIdTimestamp) { - return new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, + return new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, retry); } @@ -3561,7 +3561,7 @@ public void afterRefresh(boolean didRefresh) throws IOException { public void testSequenceIDs() throws Exception { Tuple seqID = getSequenceID(engine, new Engine.Get(false, "type", "2", newUid("1"))); // Non-existent doc returns no seqnum and no primary term - assertThat(seqID.v1(), equalTo(SequenceNumbersService.UNASSIGNED_SEQ_NO)); + assertThat(seqID.v1(), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); assertThat(seqID.v2(), equalTo(0L)); // create a document @@ -3592,7 +3592,7 @@ public void testSequenceIDs() throws Exception { document = testDocumentWithTextField(); document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); doc = testParsedDocument("1", null, document, B_1, null); - engine.index(new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 3, + engine.index(new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 3, Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false)); engine.refresh("test"); @@ -3629,9 +3629,9 @@ private SequenceNumbersService getStallingSeqNoService( return new SequenceNumbersService( shardId, defaultSettings, - SequenceNumbersService.NO_OPS_PERFORMED, - SequenceNumbersService.NO_OPS_PERFORMED, - SequenceNumbersService.UNASSIGNED_SEQ_NO) { + SequenceNumbers.NO_OPS_PERFORMED, + SequenceNumbers.NO_OPS_PERFORMED, + SequenceNumbers.UNASSIGNED_SEQ_NO) { @Override public long generateSeqNo() { final long seqNo = super.generateSeqNo(); @@ -3661,7 +3661,7 @@ public void testSequenceNumberAdvancesToMaxSeqOnEngineOpenOnPrimary() throws Bro final AtomicReference latchReference = new AtomicReference<>(new CountDownLatch(1)); final CyclicBarrier barrier = new CyclicBarrier(2); final AtomicBoolean stall = new AtomicBoolean(); - final AtomicLong expectedLocalCheckpoint = new AtomicLong(SequenceNumbersService.NO_OPS_PERFORMED); + final AtomicLong expectedLocalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final List threads = new ArrayList<>(); final SequenceNumbersService seqNoService = getStallingSeqNoService(latchReference, barrier, stall, expectedLocalCheckpoint); initialEngine = createEngine(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null, (config) -> seqNoService); @@ -3756,7 +3756,7 @@ public void testOutOfOrderSequenceNumbersWithVersionConflict() throws IOExceptio final AtomicLong sequenceNumber = new AtomicLong(); final Engine.Operation.Origin origin = randomFrom(LOCAL_TRANSLOG_RECOVERY, PEER_RECOVERY, PRIMARY, REPLICA); final LongSupplier sequenceNumberSupplier = - origin == PRIMARY ? () -> SequenceNumbersService.UNASSIGNED_SEQ_NO : sequenceNumber::getAndIncrement; + origin == PRIMARY ? () -> SequenceNumbers.UNASSIGNED_SEQ_NO : sequenceNumber::getAndIncrement; document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); final ParsedDocument doc = testParsedDocument("1", null, document, B_1, null); final Term uid = newUid(doc); @@ -3891,7 +3891,7 @@ public void testMinGenerationForSeqNo() throws IOException, BrokenBarrierExcepti final AtomicReference latchReference = new AtomicReference<>(); final CyclicBarrier barrier = new CyclicBarrier(2); final AtomicBoolean stall = new AtomicBoolean(); - final AtomicLong expectedLocalCheckpoint = new AtomicLong(SequenceNumbersService.NO_OPS_PERFORMED); + final AtomicLong expectedLocalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final Map threads = new LinkedHashMap<>(); final SequenceNumbersService seqNoService = getStallingSeqNoService(latchReference, barrier, stall, expectedLocalCheckpoint); actualEngine = createEngine(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null, (config) -> seqNoService); @@ -3967,7 +3967,7 @@ private Tuple getSequenceID(Engine engine, Engine.Get get) throws En DocIdAndSeqNo docIdAndSeqNo = VersionsAndSeqNoResolver.loadDocIdAndSeqNo(searcher.reader(), get.uid()); if (docIdAndSeqNo == null) { primaryTerm = 0; - seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; } else { seqNo = docIdAndSeqNo.seqNo; primaryTerm = VersionsAndSeqNoResolver.loadPrimaryTerm(docIdAndSeqNo, get.uid().field()); @@ -3987,9 +3987,9 @@ public void testRestoreLocalCheckpointFromTranslog() throws IOException { new SequenceNumbersService( shardId, defaultSettings, - SequenceNumbersService.NO_OPS_PERFORMED, - SequenceNumbersService.NO_OPS_PERFORMED, - SequenceNumbersService.UNASSIGNED_SEQ_NO) { + SequenceNumbers.NO_OPS_PERFORMED, + SequenceNumbers.NO_OPS_PERFORMED, + SequenceNumbers.UNASSIGNED_SEQ_NO) { @Override public void markSeqNoAsCompleted(long seqNo) { super.markSeqNoAsCompleted(seqNo); @@ -4025,7 +4025,7 @@ public SequenceNumbersService seqNoService() { } final long currentLocalCheckpoint = actualEngine.seqNoService().getLocalCheckpoint(); final long resetLocalCheckpoint = - randomIntBetween(Math.toIntExact(SequenceNumbersService.NO_OPS_PERFORMED), Math.toIntExact(currentLocalCheckpoint)); + randomIntBetween(Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED), Math.toIntExact(currentLocalCheckpoint)); actualEngine.seqNoService().resetLocalCheckpoint(resetLocalCheckpoint); completedSeqNos.clear(); actualEngine.restoreLocalCheckpointFromTranslog(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java index 425d2d8d4ded2..43136b67e8ccf 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java @@ -200,11 +200,11 @@ public void testRangeQuery() throws IOException { LongPoint.newRangeQuery("field", instant1, instant2), SortedNumericDocValuesField.newSlowRangeQuery("field", instant1, instant2)); assertEquals(expected, - ft.rangeQuery(date1, date2, true, true, context).rewrite(new MultiReader())); + ft.rangeQuery(date1, date2, true, true, null, null, null, context).rewrite(new MultiReader())); ft.setIndexOptions(IndexOptions.NONE); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> ft.rangeQuery(date1, date2, true, true, context)); + () -> ft.rangeQuery(date1, date2, true, true, null, null, null, context)); assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index 4d83cc998462c..cbf890ef47687 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.mapper; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesArray; @@ -1375,4 +1376,26 @@ public void testDynamicFieldsStartingAndEndingWithDot() throws Exception { containsString("object field starting or ending with a [.] makes object resolution ambiguous: [top..foo..bar]")); } } + + public void testBlankFieldNames() throws Exception { + final BytesReference bytes = XContentFactory.jsonBuilder() + .startObject() + .field("", "foo") + .endObject().bytes(); + + MapperParsingException err = expectThrows(MapperParsingException.class, () -> + client().prepareIndex("idx", "type").setSource(bytes, XContentType.JSON).get()); + assertThat(ExceptionsHelper.detailedMessage(err), containsString("field name cannot be an empty string")); + + final BytesReference bytes2 = XContentFactory.jsonBuilder() + .startObject() + .startObject("foo") + .field("", "bar") + .endObject() + .endObject().bytes(); + + err = expectThrows(MapperParsingException.class, () -> + client().prepareIndex("idx", "type").setSource(bytes2, XContentType.JSON).get()); + assertThat(ExceptionsHelper.detailedMessage(err), containsString("field name cannot be an empty string")); + } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java deleted file mode 100644 index 686bbafbcd23a..0000000000000 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.mapper; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.bulk.BulkItemResponse; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.bulk.TransportBulkAction; -import org.elasticsearch.action.bulk.TransportShardBulkAction; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.AutoCreateIndex; -import org.elasticsearch.action.update.UpdateHelper; -import org.elasticsearch.client.Requests; -import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.MockTcpTransport; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportService; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import java.util.Collections; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; -import static org.hamcrest.CoreMatchers.instanceOf; - -public class DynamicMappingDisabledTests extends ESSingleNodeTestCase { - - private static ThreadPool threadPool; - private ClusterService clusterService; - private TransportService transportService; - private TransportBulkAction transportBulkAction; - - @BeforeClass - public static void createThreadPool() { - threadPool = new TestThreadPool("DynamicMappingDisabledTests"); - } - - @Override - public void setUp() throws Exception { - super.setUp(); - Settings settings = Settings.builder() - .put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false) - .build(); - clusterService = createClusterService(threadPool); - Transport transport = new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, - new NoneCircuitBreakerService(), new NamedWriteableRegistry(Collections.emptyList()), - new NetworkService(Collections.emptyList())); - transportService = new TransportService(clusterService.getSettings(), transport, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - ShardStateAction shardStateAction = new ShardStateAction(settings, clusterService, transportService, null, null, threadPool); - ActionFilters actionFilters = new ActionFilters(Collections.emptySet()); - IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(settings); - AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, new ClusterSettings(settings, - ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), indexNameExpressionResolver); - UpdateHelper updateHelper = new UpdateHelper(settings, null); - TransportShardBulkAction shardBulkAction = new TransportShardBulkAction(settings, transportService, clusterService, - indicesService, threadPool, shardStateAction, null, updateHelper, actionFilters, indexNameExpressionResolver); - transportBulkAction = new TransportBulkAction(settings, threadPool, transportService, clusterService, - null, shardBulkAction, null, actionFilters, indexNameExpressionResolver, autoCreateIndex, System::currentTimeMillis); - } - - @After - public void tearDown() throws Exception { - super.tearDown(); - clusterService.close(); - transportService.close(); - } - - - @AfterClass - public static void destroyThreadPool() { - ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); - // since static must set to null to be eligible for collection - threadPool = null; - } - - public void testDynamicDisabled() { - IndexRequest request = new IndexRequest("index", "type", "1"); - request.source(Requests.INDEX_CONTENT_TYPE, "foo", 3); - BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(request); - final AtomicBoolean gotResponse = new AtomicBoolean(); - - transportBulkAction.execute(bulkRequest, new ActionListener() { - @Override - public void onResponse(BulkResponse bulkResponse) { - BulkItemResponse itemResponse = bulkResponse.getItems()[0]; - assertTrue(itemResponse.isFailed()); - assertThat(itemResponse.getFailure().getCause(), instanceOf(IndexNotFoundException.class)); - assertEquals("no such index and [index.mapper.dynamic] is [false]", itemResponse.getFailure().getCause().getMessage()); - gotResponse.set(true); - } - - @Override - public void onFailure(Exception e) { - fail("unexpected failure in bulk action, expected failed bulk item"); - } - }); - - assertTrue(gotResponse.get()); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index d183242ee19fe..4172a6172005e 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -153,34 +153,4 @@ public void run() { assertTrue(client().prepareGet("index", "type", Integer.toString(i)).get().isExists()); } } - - public void testAutoCreateWithDisabledDynamicMappings() throws Exception { - assertAcked(client().admin().indices().preparePutTemplate("my_template") - .setCreate(true) - .setPatterns(Collections.singletonList("index_*")) - .addMapping("foo", "field", "type=keyword") - .setSettings(Settings.builder().put("index.mapper.dynamic", false).build()) - .get()); - - // succeeds since 'foo' has an explicit mapping in the template - indexRandom(true, false, client().prepareIndex("index_1", "foo", "1").setSource("field", "abc")); - - // fails since 'bar' does not have an explicit mapping in the template and dynamic template creation is disabled - TypeMissingException e1 = expectThrows(TypeMissingException.class, - () -> client().prepareIndex("index_2", "bar", "1").setSource("field", "abc").get()); - assertEquals("type[bar] missing", e1.getMessage()); - assertEquals("trying to auto create mapping, but dynamic mapping is disabled", e1.getCause().getMessage()); - - BulkResponse bulkResponse = client().prepareBulk().add(new IndexRequest("index_2", "bar", "2").source("field", "abc")).get(); - assertTrue(bulkResponse.hasFailures()); - BulkItemResponse.Failure firstFailure = bulkResponse.getItems()[0].getFailure(); - assertThat(firstFailure.getCause(), instanceOf(TypeMissingException.class)); - assertEquals("type[bar] missing", firstFailure.getCause().getMessage()); - assertEquals("trying to auto create mapping, but dynamic mapping is disabled", firstFailure.getCause().getCause().getMessage()); - - // make sure no mappings were created for bar - GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices("index_2").get(); - assertFalse(getIndexResponse.mappings().containsKey("bar")); - } - } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingVersionTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingVersionTests.java new file mode 100644 index 0000000000000..94af6c5454493 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingVersionTests.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.indices.TypeMissingException; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; + +import java.io.IOException; +import java.util.Collection; + +public class DynamicMappingVersionTests extends ESSingleNodeTestCase { + + @Override + protected Collection> getPlugins() { + return pluginList(InternalSettingsPlugin.class); + } + + public void testDynamicMappingDefault() throws IOException { + MapperService mapperService = createIndex("my-index").mapperService(); + DocumentMapper documentMapper = mapperService + .documentMapperWithAutoCreate("my-type").getDocumentMapper(); + + ParsedDocument parsedDoc = documentMapper.parse( + SourceToParse.source("my-index", "my-type", "1", XContentFactory.jsonBuilder() + .startObject() + .field("foo", 3) + .endObject() + .bytes(), XContentType.JSON)); + + String expectedMapping = XContentFactory.jsonBuilder().startObject() + .startObject("my-type") + .startObject("properties") + .startObject("foo").field("type", "long") + .endObject().endObject().endObject().endObject().string(); + assertEquals(expectedMapping, parsedDoc.dynamicMappingsUpdate().toString()); + } + + public void testDynamicMappingSettingRemoval() { + Settings settings = Settings.builder() + .put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false) + .build(); + Exception e = expectThrows(IllegalArgumentException.class, () -> createIndex("test-index", settings)); + assertEquals(e.getMessage(), "Setting index.mapper.dynamic was removed after version 6.0.0"); + } + + public void testDynamicMappingDisablePreEs6() { + Settings settingsPreEs6 = Settings.builder() + .put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) + .build(); + MapperService preEs6MapperService = createIndex("pre-es6-index", settingsPreEs6).mapperService(); + Exception e = expectThrows(TypeMissingException.class, + () -> preEs6MapperService.documentMapperWithAutoCreate("pre-es6-type")); + assertEquals(e.getMessage(), "type[pre-es6-type] missing"); + } +} diff --git a/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java index dde37962af586..9d25e2b70b89c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java @@ -38,6 +38,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; import java.util.function.Supplier; @@ -56,7 +57,7 @@ private static SortedSet set(T... values) { return new TreeSet<>(Arrays.asList(values)); } - void assertFieldNames(SortedSet expected, ParsedDocument doc) { + void assertFieldNames(Set expected, ParsedDocument doc) { String[] got = doc.rootDoc().getValues("_field_names"); assertEquals(expected, set(got)); } @@ -120,6 +121,26 @@ public void testExplicitEnabled() throws Exception { assertFieldNames(set("field", "field.keyword", "_id", "_version", "_seq_no", "_primary_term", "_source"), doc); } + public void testDedup() throws Exception { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("_field_names").field("enabled", true).endObject() + .endObject().endObject().string(); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + FieldNamesFieldMapper fieldNamesMapper = docMapper.metadataMapper(FieldNamesFieldMapper.class); + assertTrue(fieldNamesMapper.fieldType().isEnabled()); + + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() + .startObject() + .field("field", 3) // will create 2 lucene fields under the hood: index and doc values + .endObject() + .bytes(), + XContentType.JSON)); + + Set fields = set("field", "_id", "_version", "_seq_no", "_primary_term", "_source"); + assertFieldNames(fields, doc); + assertEquals(fields.size(), doc.rootDoc().getValues("_field_names").length); + } + public void testDisabled() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("_field_names").field("enabled", false).endObject() diff --git a/core/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java index ffd83475ab887..5be1923cbed3c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java @@ -42,7 +42,7 @@ public void testRangeQuery() { MappedFieldType ft = createDefaultFieldType(); ft.setName("_id"); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null)); + () -> ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null, null, null, null)); assertEquals("Field [_id] of type [_id] does not support range queries", e.getMessage()); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/IpFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/IpFieldTypeTests.java index 5c65aa5a09de7..1c0024b769ff4 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/IpFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/IpFieldTypeTests.java @@ -106,83 +106,84 @@ public void testRangeQuery() { InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddressPoint.MAX_VALUE), - ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null)); + ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null, null, null, null)); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.2.0")), - ft.rangeQuery(null, "192.168.2.0", randomBoolean(), true, null)); + ft.rangeQuery(null, "192.168.2.0", randomBoolean(), true, null, null, null, null)); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.1.255")), - ft.rangeQuery(null, "192.168.2.0", randomBoolean(), false, null)); + ft.rangeQuery(null, "192.168.2.0", randomBoolean(), false, null, null, null, null)); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddressPoint.MAX_VALUE), - ft.rangeQuery("2001:db8::", null, true, randomBoolean(), null)); + ft.rangeQuery("2001:db8::", null, true, randomBoolean(), null, null, null, null)); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddressPoint.MAX_VALUE), - ft.rangeQuery("2001:db8::", null, false, randomBoolean(), null)); + ft.rangeQuery("2001:db8::", null, false, randomBoolean(), null, null, null, null)); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddresses.forString("2001:db8::ffff")), - ft.rangeQuery("2001:db8::", "2001:db8::ffff", true, true, null)); + ft.rangeQuery("2001:db8::", "2001:db8::ffff", true, true, null, null, null, null)); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddresses.forString("2001:db8::fffe")), - ft.rangeQuery("2001:db8::", "2001:db8::ffff", false, false, null)); + ft.rangeQuery("2001:db8::", "2001:db8::ffff", false, false, null, null, null, null)); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::2"), InetAddresses.forString("2001:db8::")), // same lo/hi values but inclusive=false so this won't match anything - ft.rangeQuery("2001:db8::1", "2001:db8::1", false, false, null)); + ft.rangeQuery("2001:db8::1", "2001:db8::1", false, false, null, null, null, null)); // Upper bound is the min IP and is not inclusive assertEquals(new MatchNoDocsQuery(), - ft.rangeQuery("::", "::", true, false, null)); + ft.rangeQuery("::", "::", true, false, null, null, null, null)); // Lower bound is the max IP and is not inclusive assertEquals(new MatchNoDocsQuery(), - ft.rangeQuery("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false, true, null)); + ft.rangeQuery("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + false, true, null, null, null, null)); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("::fffe:ffff:ffff")), // same lo/hi values but inclusive=false so this won't match anything - ft.rangeQuery("::", "0.0.0.0", true, false, null)); + ft.rangeQuery("::", "0.0.0.0", true, false, null, null, null, null)); assertEquals( InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::1:0:0:0"), InetAddressPoint.MAX_VALUE), // same lo/hi values but inclusive=false so this won't match anything - ft.rangeQuery("255.255.255.255", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false, true, null)); + ft.rangeQuery("255.255.255.255", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false, true, null, null, null, null)); assertEquals( // lower bound is ipv4, upper bound is ipv6 InetAddressPoint.newRangeQuery("field", InetAddresses.forString("192.168.1.7"), InetAddresses.forString("2001:db8::")), - ft.rangeQuery("::ffff:c0a8:107", "2001:db8::", true, true, null)); + ft.rangeQuery("::ffff:c0a8:107", "2001:db8::", true, true, null, null, null, null)); ft.setIndexOptions(IndexOptions.NONE); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> ft.rangeQuery("::1", "2001::", true, true, null)); + () -> ft.rangeQuery("::1", "2001::", true, true, null, null, null, null)); assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/NumberFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/NumberFieldTypeTests.java index 58ece7b507388..0066c5a7798b6 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/NumberFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/NumberFieldTypeTests.java @@ -139,76 +139,116 @@ public void testRangeQueryWithNegativeBounds() { MappedFieldType ftInt = new NumberFieldMapper.NumberFieldType(NumberType.INTEGER); ftInt.setName("field"); ftInt.setIndexOptions(IndexOptions.DOCS); - assertEquals(ftInt.rangeQuery(-3, -3, true, true, null), ftInt.rangeQuery(-3.5, -2.5, true, true, null)); - assertEquals(ftInt.rangeQuery(-3, -3, true, true, null), ftInt.rangeQuery(-3.5, -2.5, false, false, null)); - assertEquals(ftInt.rangeQuery(0, 0, true, true, null), ftInt.rangeQuery(-0.5, 0.5, true, true, null)); - assertEquals(ftInt.rangeQuery(0, 0, true, true, null), ftInt.rangeQuery(-0.5, 0.5, false, false, null)); - assertEquals(ftInt.rangeQuery(1, 2, true, true, null), ftInt.rangeQuery(0.5, 2.5, true, true, null)); - assertEquals(ftInt.rangeQuery(1, 2, true, true, null), ftInt.rangeQuery(0.5, 2.5, false, false, null)); - assertEquals(ftInt.rangeQuery(0, 2, true, true, null), ftInt.rangeQuery(-0.5, 2.5, true, true, null)); - assertEquals(ftInt.rangeQuery(0, 2, true, true, null), ftInt.rangeQuery(-0.5, 2.5, false, false, null)); - - assertEquals(ftInt.rangeQuery(-2, 0, true, true, null), ftInt.rangeQuery(-2.5, 0.5, true, true, null)); - assertEquals(ftInt.rangeQuery(-2, 0, true, true, null), ftInt.rangeQuery(-2.5, 0.5, false, false, null)); - assertEquals(ftInt.rangeQuery(-2, -1, true, true, null), ftInt.rangeQuery(-2.5, -0.5, true, true, null)); - assertEquals(ftInt.rangeQuery(-2, -1, true, true, null), ftInt.rangeQuery(-2.5, -0.5, false, false, null)); + assertEquals(ftInt.rangeQuery(-3, -3, true, true, null, null, null, null), + ftInt.rangeQuery(-3.5, -2.5, true, true, null, null, null, null)); + assertEquals(ftInt.rangeQuery(-3, -3, true, true, null, null, null, null), + ftInt.rangeQuery(-3.5, -2.5, false, false, null, null, null, null)); + assertEquals(ftInt.rangeQuery(0, 0, true, true, null, null, null, null), + ftInt.rangeQuery(-0.5, 0.5, true, true, null, null, null, null)); + assertEquals(ftInt.rangeQuery(0, 0, true, true, null, null, null, null), + ftInt.rangeQuery(-0.5, 0.5, false, false, null, null, null, null)); + assertEquals(ftInt.rangeQuery(1, 2, true, true, null, null, null, null), + ftInt.rangeQuery(0.5, 2.5, true, true, null, null, null, null)); + assertEquals(ftInt.rangeQuery(1, 2, true, true, null, null, null, null), + ftInt.rangeQuery(0.5, 2.5, false, false, null, null, null, null)); + assertEquals(ftInt.rangeQuery(0, 2, true, true, null, null, null, null), + ftInt.rangeQuery(-0.5, 2.5, true, true, null, null, null, null)); + assertEquals(ftInt.rangeQuery(0, 2, true, true, null, null, null, null), + ftInt.rangeQuery(-0.5, 2.5, false, false, null, null, null, null)); + + assertEquals(ftInt.rangeQuery(-2, 0, true, true, null, null, null, null), + ftInt.rangeQuery(-2.5, 0.5, true, true, null, null, null, null)); + assertEquals(ftInt.rangeQuery(-2, 0, true, true, null, null, null, null), + ftInt.rangeQuery(-2.5, 0.5, false, false, null, null, null, null)); + assertEquals(ftInt.rangeQuery(-2, -1, true, true, null, null, null, null), + ftInt.rangeQuery(-2.5, -0.5, true, true, null, null, null, null)); + assertEquals(ftInt.rangeQuery(-2, -1, true, true, null, null, null, null), + ftInt.rangeQuery(-2.5, -0.5, false, false, null, null, null, null)); MappedFieldType ftLong = new NumberFieldMapper.NumberFieldType(NumberType.LONG); ftLong.setName("field"); ftLong.setIndexOptions(IndexOptions.DOCS); - assertEquals(ftLong.rangeQuery(-3, -3, true, true, null), ftLong.rangeQuery(-3.5, -2.5, true, true, null)); - assertEquals(ftLong.rangeQuery(-3, -3, true, true, null), ftLong.rangeQuery(-3.5, -2.5, false, false, null)); - assertEquals(ftLong.rangeQuery(0, 0, true, true, null), ftLong.rangeQuery(-0.5, 0.5, true, true, null)); - assertEquals(ftLong.rangeQuery(0, 0, true, true, null), ftLong.rangeQuery(-0.5, 0.5, false, false, null)); - assertEquals(ftLong.rangeQuery(1, 2, true, true, null), ftLong.rangeQuery(0.5, 2.5, true, true, null)); - assertEquals(ftLong.rangeQuery(1, 2, true, true, null), ftLong.rangeQuery(0.5, 2.5, false, false, null)); - assertEquals(ftLong.rangeQuery(0, 2, true, true, null), ftLong.rangeQuery(-0.5, 2.5, true, true, null)); - assertEquals(ftLong.rangeQuery(0, 2, true, true, null), ftLong.rangeQuery(-0.5, 2.5, false, false, null)); - - assertEquals(ftLong.rangeQuery(-2, 0, true, true, null), ftLong.rangeQuery(-2.5, 0.5, true, true, null)); - assertEquals(ftLong.rangeQuery(-2, 0, true, true, null), ftLong.rangeQuery(-2.5, 0.5, false, false, null)); - assertEquals(ftLong.rangeQuery(-2, -1, true, true, null), ftLong.rangeQuery(-2.5, -0.5, true, true, null)); - assertEquals(ftLong.rangeQuery(-2, -1, true, true, null), ftLong.rangeQuery(-2.5, -0.5, false, false, null)); + assertEquals(ftLong.rangeQuery(-3, -3, true, true, null, null, null, null), + ftLong.rangeQuery(-3.5, -2.5, true, true, null, null, null, null)); + assertEquals(ftLong.rangeQuery(-3, -3, true, true, null, null, null, null), + ftLong.rangeQuery(-3.5, -2.5, false, false, null, null, null, null)); + assertEquals(ftLong.rangeQuery(0, 0, true, true, null, null, null, null), + ftLong.rangeQuery(-0.5, 0.5, true, true, null, null, null, null)); + assertEquals(ftLong.rangeQuery(0, 0, true, true, null, null, null, null), + ftLong.rangeQuery(-0.5, 0.5, false, false, null, null, null, null)); + assertEquals(ftLong.rangeQuery(1, 2, true, true, null, null, null, null), + ftLong.rangeQuery(0.5, 2.5, true, true, null, null, null, null)); + assertEquals(ftLong.rangeQuery(1, 2, true, true, null, null, null, null), + ftLong.rangeQuery(0.5, 2.5, false, false, null, null, null, null)); + assertEquals(ftLong.rangeQuery(0, 2, true, true, null, null, null, null), + ftLong.rangeQuery(-0.5, 2.5, true, true, null, null, null, null)); + assertEquals(ftLong.rangeQuery(0, 2, true, true, null, null, null, null), + ftLong.rangeQuery(-0.5, 2.5, false, false, null, null, null, null)); + + assertEquals(ftLong.rangeQuery(-2, 0, true, true, null, null, null, null), + ftLong.rangeQuery(-2.5, 0.5, true, true, null, null, null, null)); + assertEquals(ftLong.rangeQuery(-2, 0, true, true, null, null, null, null), + ftLong.rangeQuery(-2.5, 0.5, false, false, null, null, null, null)); + assertEquals(ftLong.rangeQuery(-2, -1, true, true, null, null, null, null), + ftLong.rangeQuery(-2.5, -0.5, true, true, null, null, null, null)); + assertEquals(ftLong.rangeQuery(-2, -1, true, true, null, null, null, null), + ftLong.rangeQuery(-2.5, -0.5, false, false, null, null, null, null)); } public void testByteRangeQueryWithDecimalParts() { MappedFieldType ft = new NumberFieldMapper.NumberFieldType(NumberType.BYTE); ft.setName("field"); ft.setIndexOptions(IndexOptions.DOCS); - assertEquals(ft.rangeQuery(2, 10, true, true, null), ft.rangeQuery(1.1, 10, true, true, null)); - assertEquals(ft.rangeQuery(2, 10, true, true, null), ft.rangeQuery(1.1, 10, false, true, null)); - assertEquals(ft.rangeQuery(1, 10, true, true, null), ft.rangeQuery(1, 10.1, true, true, null)); - assertEquals(ft.rangeQuery(1, 10, true, true, null), ft.rangeQuery(1, 10.1, true, false, null)); + assertEquals(ft.rangeQuery(2, 10, true, true, null, null, null, null), + ft.rangeQuery(1.1, 10, true, true, null, null, null, null)); + assertEquals(ft.rangeQuery(2, 10, true, true, null, null, null, null), + ft.rangeQuery(1.1, 10, false, true, null, null, null, null)); + assertEquals(ft.rangeQuery(1, 10, true, true, null, null, null, null), + ft.rangeQuery(1, 10.1, true, true, null, null, null, null)); + assertEquals(ft.rangeQuery(1, 10, true, true, null, null, null, null), + ft.rangeQuery(1, 10.1, true, false, null, null, null, null)); } public void testShortRangeQueryWithDecimalParts() { MappedFieldType ft = new NumberFieldMapper.NumberFieldType(NumberType.SHORT); ft.setName("field"); ft.setIndexOptions(IndexOptions.DOCS); - assertEquals(ft.rangeQuery(2, 10, true, true, null), ft.rangeQuery(1.1, 10, true, true, null)); - assertEquals(ft.rangeQuery(2, 10, true, true, null), ft.rangeQuery(1.1, 10, false, true, null)); - assertEquals(ft.rangeQuery(1, 10, true, true, null), ft.rangeQuery(1, 10.1, true, true, null)); - assertEquals(ft.rangeQuery(1, 10, true, true, null), ft.rangeQuery(1, 10.1, true, false, null)); + assertEquals(ft.rangeQuery(2, 10, true, true, null, null, null, null), + ft.rangeQuery(1.1, 10, true, true, null, null, null, null)); + assertEquals(ft.rangeQuery(2, 10, true, true, null, null, null, null), + ft.rangeQuery(1.1, 10, false, true, null, null, null, null)); + assertEquals(ft.rangeQuery(1, 10, true, true, null, null, null, null), + ft.rangeQuery(1, 10.1, true, true, null, null, null, null)); + assertEquals(ft.rangeQuery(1, 10, true, true, null, null, null, null), + ft.rangeQuery(1, 10.1, true, false, null, null, null, null)); } public void testIntegerRangeQueryWithDecimalParts() { MappedFieldType ft = new NumberFieldMapper.NumberFieldType(NumberType.INTEGER); ft.setName("field"); ft.setIndexOptions(IndexOptions.DOCS); - assertEquals(ft.rangeQuery(2, 10, true, true, null), ft.rangeQuery(1.1, 10, true, true, null)); - assertEquals(ft.rangeQuery(2, 10, true, true, null), ft.rangeQuery(1.1, 10, false, true, null)); - assertEquals(ft.rangeQuery(1, 10, true, true, null), ft.rangeQuery(1, 10.1, true, true, null)); - assertEquals(ft.rangeQuery(1, 10, true, true, null), ft.rangeQuery(1, 10.1, true, false, null)); + assertEquals(ft.rangeQuery(2, 10, true, true, null, null, null, null), + ft.rangeQuery(1.1, 10, true, true, null, null, null, null)); + assertEquals(ft.rangeQuery(2, 10, true, true, null, null, null, null), + ft.rangeQuery(1.1, 10, false, true, null, null, null, null)); + assertEquals(ft.rangeQuery(1, 10, true, true, null, null, null, null), + ft.rangeQuery(1, 10.1, true, true, null, null, null, null)); + assertEquals(ft.rangeQuery(1, 10, true, true, null, null, null, null), + ft.rangeQuery(1, 10.1, true, false, null, null, null, null)); } public void testLongRangeQueryWithDecimalParts() { MappedFieldType ft = new NumberFieldMapper.NumberFieldType(NumberType.LONG); ft.setName("field"); ft.setIndexOptions(IndexOptions.DOCS); - assertEquals(ft.rangeQuery(2, 10, true, true, null), ft.rangeQuery(1.1, 10, true, true, null)); - assertEquals(ft.rangeQuery(2, 10, true, true, null), ft.rangeQuery(1.1, 10, false, true, null)); - assertEquals(ft.rangeQuery(1, 10, true, true, null), ft.rangeQuery(1, 10.1, true, true, null)); - assertEquals(ft.rangeQuery(1, 10, true, true, null), ft.rangeQuery(1, 10.1, true, false, null)); + assertEquals(ft.rangeQuery(2, 10, true, true, null, null, null, null), + ft.rangeQuery(1.1, 10, true, true, null, null, null, null)); + assertEquals(ft.rangeQuery(2, 10, true, true, null, null, null, null), + ft.rangeQuery(1.1, 10, false, true, null, null, null, null)); + assertEquals(ft.rangeQuery(1, 10, true, true, null, null, null, null), + ft.rangeQuery(1, 10.1, true, true, null, null, null, null)); + assertEquals(ft.rangeQuery(1, 10, true, true, null, null, null, null), + ft.rangeQuery(1, 10.1, true, false, null, null, null, null)); } public void testRangeQuery() { @@ -218,11 +258,11 @@ public void testRangeQuery() { Query expected = new IndexOrDocValuesQuery( LongPoint.newRangeQuery("field", 1, 3), SortedNumericDocValuesField.newSlowRangeQuery("field", 1, 3)); - assertEquals(expected, ft.rangeQuery("1", "3", true, true, null)); + assertEquals(expected, ft.rangeQuery("1", "3", true, true, null, null, null, null)); ft.setIndexOptions(IndexOptions.NONE); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> ft.rangeQuery("1", "3", true, true, null)); + () -> ft.rangeQuery("1", "3", true, true, null, null, null, null)); assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/UidFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/UidFieldTypeTests.java index 2fcd0f8fd4071..9b2e0ceb0721f 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/UidFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/UidFieldTypeTests.java @@ -45,7 +45,7 @@ public void testRangeQuery() { MappedFieldType ft = createDefaultFieldType(); ft.setName("_uid"); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null)); + () -> ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null, null, null, null)); assertEquals("Field [_uid] of type [_uid] does not support range queries", e.getMessage()); } diff --git a/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java index be845bc1f2185..e440fc0277229 100644 --- a/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java @@ -149,35 +149,4 @@ public void testFromJson() throws IOException { assertThat(parsed.ids(), contains("1","100","4")); assertEquals(json, 0, parsed.types().length); } - - public void testFromJsonDeprecatedSyntax() throws IOException { - IdsQueryBuilder testQuery = new IdsQueryBuilder().types("my_type"); - - //single value type can also be called _type - final String contentString = "{\n" + - " \"ids\" : {\n" + - " \"_type\" : \"my_type\",\n" + - " \"values\" : [ ]\n" + - " }\n" + - "}"; - - IdsQueryBuilder parsed = (IdsQueryBuilder) parseQuery(contentString); - assertEquals(testQuery, parsed); - - parseQuery(contentString); - assertWarnings("Deprecated field [_type] used, expected [type] instead"); - - //array of types can also be called types rather than type - final String contentString2 = "{\n" + - " \"ids\" : {\n" + - " \"types\" : [\"my_type\"],\n" + - " \"values\" : [ ]\n" + - " }\n" + - "}"; - parsed = (IdsQueryBuilder) parseQuery(contentString2); - assertEquals(testQuery, parsed); - - parseQuery(contentString2); - assertWarnings("Deprecated field [types] used, expected [type] instead"); - } } diff --git a/core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java index 78975bf7b1784..a4e6856166272 100644 --- a/core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java @@ -139,8 +139,8 @@ public void testEqualsAndHashcode() { public static InnerHitBuilder randomInnerHits() { InnerHitBuilder innerHits = new InnerHitBuilder(); innerHits.setName(randomAlphaOfLengthBetween(1, 16)); - innerHits.setFrom(randomIntBetween(0, 128)); - innerHits.setSize(randomIntBetween(0, 128)); + innerHits.setFrom(randomIntBetween(0, 32)); + innerHits.setSize(randomIntBetween(0, 32)); innerHits.setExplain(randomBoolean()); innerHits.setVersion(randomBoolean()); innerHits.setTrackScores(randomBoolean()); diff --git a/core/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java index 0e3928c89883c..9d674a1a0d05a 100644 --- a/core/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java @@ -26,6 +26,8 @@ import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.elasticsearch.index.search.ESToParentBlockJoinQuery; @@ -41,6 +43,7 @@ import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.index.IndexSettingsTests.newIndexMeta; import static org.elasticsearch.index.query.InnerHitBuilderTests.randomInnerHits; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; @@ -325,6 +328,11 @@ public void testBuildIgnoreUnmappedNestQuery() throws Exception { SearchContext searchContext = mock(SearchContext.class); when(searchContext.getQueryShardContext()).thenReturn(queryShardContext); + MapperService mapperService = mock(MapperService.class); + IndexSettings settings = new IndexSettings(newIndexMeta("index", Settings.EMPTY), Settings.EMPTY); + when(mapperService.getIndexSettings()).thenReturn(settings); + when(searchContext.mapperService()).thenReturn(mapperService); + InnerHitBuilder leafInnerHits = randomInnerHits(); NestedQueryBuilder query1 = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None); query1.innerHit(leafInnerHits); diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index d2183dfcc20d2..9609c8415a789 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -45,10 +45,8 @@ import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; import org.elasticsearch.common.settings.Settings; @@ -1009,6 +1007,28 @@ public void testDefaultField() throws Exception { ); } + /** + * the quote analyzer should overwrite any other forced analyzer in quoted parts of the query + */ + public void testQuoteAnalyzer() throws Exception { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + // Prefix + Query query = new QueryStringQueryBuilder("ONE \"TWO THREE\"") + .field(STRING_FIELD_NAME) + .analyzer("whitespace") + .quoteAnalyzer("simple") + .toQuery(createShardContext()); + Query expectedQuery = + new BooleanQuery.Builder() + .add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "ONE")), Occur.SHOULD)) + .add(new BooleanClause(new PhraseQuery.Builder() + .add(new Term(STRING_FIELD_NAME, "two"), 0) + .add(new Term(STRING_FIELD_NAME, "three"), 1) + .build(), Occur.SHOULD)) + .build(); + assertEquals(expectedQuery, query); + } + private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings, Settings indexSettings) { Settings build = Settings.builder().put(oldIndexSettings) .put(indexSettings) diff --git a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java index b57b45c3d7484..b101a68185590 100644 --- a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java @@ -234,16 +234,6 @@ public void testIllegalArguments() { expectThrows(IllegalArgumentException.class, () -> rangeQueryBuilder.format("badFormat")); } - /** - * Specifying a timezone together with a numeric range query should throw an exception. - */ - public void testToQueryNonDateWithTimezone() throws QueryShardException { - RangeQueryBuilder query = new RangeQueryBuilder(INT_FIELD_NAME); - query.from(1).to(10).timeZone("UTC"); - QueryShardException e = expectThrows(QueryShardException.class, () -> query.toQuery(createShardContext())); - assertThat(e.getMessage(), containsString("[range] time_zone can not be applied")); - } - /** * Specifying a timezone together with an unmapped field should throw an exception. */ @@ -364,7 +354,7 @@ public void testDateRangeQueryTimezone() throws IOException { " }\n" + "}"; QueryBuilder queryBuilder = parseQuery(query); - expectThrows(QueryShardException.class, () -> queryBuilder.toQuery(createShardContext())); + queryBuilder.toQuery(createShardContext()); // no exception } public void testFromJson() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index a16e09e110b82..1447ea8ae50a9 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -37,7 +37,7 @@ import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.seqno.SeqNoStats; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTests; import org.elasticsearch.index.store.Store; @@ -165,9 +165,9 @@ public void testCheckpointsAdvance() throws Exception { */ final Matcher globalCheckpointMatcher; if (shardRouting.primary()) { - globalCheckpointMatcher = numDocs == 0 ? equalTo(SequenceNumbersService.NO_OPS_PERFORMED) : equalTo(numDocs - 1L); + globalCheckpointMatcher = numDocs == 0 ? equalTo(SequenceNumbers.NO_OPS_PERFORMED) : equalTo(numDocs - 1L); } else { - globalCheckpointMatcher = numDocs == 0 ? equalTo(SequenceNumbersService.NO_OPS_PERFORMED) + globalCheckpointMatcher = numDocs == 0 ? equalTo(SequenceNumbers.NO_OPS_PERFORMED) : anyOf(equalTo(numDocs - 1L), equalTo(numDocs - 2L)); } assertThat(shardRouting + " global checkpoint mismatch", shardStats.getGlobalCheckpoint(), globalCheckpointMatcher); @@ -177,7 +177,7 @@ public void testCheckpointsAdvance() throws Exception { // simulate a background global checkpoint sync at which point we expect the global checkpoint to advance on the replicas shards.syncGlobalCheckpoint(); - final long noOpsPerformed = SequenceNumbersService.NO_OPS_PERFORMED; + final long noOpsPerformed = SequenceNumbers.NO_OPS_PERFORMED; for (IndexShard shard : shards) { final SeqNoStats shardStats = shard.seqNoStats(); final ShardRouting shardRouting = shard.routingEntry(); diff --git a/core/src/test/java/org/elasticsearch/index/search/NestedHelperTests.java b/core/src/test/java/org/elasticsearch/index/search/NestedHelperTests.java index 1cb8451b0aac4..e781a3311b383 100644 --- a/core/src/test/java/org/elasticsearch/index/search/NestedHelperTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/NestedHelperTests.java @@ -146,28 +146,28 @@ public void testTermQuery() { } public void testRangeQuery() { - Query rangeQuery = mapperService.fullName("foo2").rangeQuery(2, 5, true, true, null); + Query rangeQuery = mapperService.fullName("foo2").rangeQuery(2, 5, true, true, null, null, null, null); assertFalse(new NestedHelper(mapperService).mightMatchNestedDocs(rangeQuery)); assertTrue(new NestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested1")); assertTrue(new NestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested2")); assertTrue(new NestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested3")); assertTrue(new NestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested_missing")); - rangeQuery = mapperService.fullName("nested1.foo2").rangeQuery(2, 5, true, true, null); + rangeQuery = mapperService.fullName("nested1.foo2").rangeQuery(2, 5, true, true, null, null, null, null); assertTrue(new NestedHelper(mapperService).mightMatchNestedDocs(rangeQuery)); assertFalse(new NestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested1")); assertTrue(new NestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested2")); assertTrue(new NestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested3")); assertTrue(new NestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested_missing")); - rangeQuery = mapperService.fullName("nested2.foo2").rangeQuery(2, 5, true, true, null); + rangeQuery = mapperService.fullName("nested2.foo2").rangeQuery(2, 5, true, true, null, null, null, null); assertTrue(new NestedHelper(mapperService).mightMatchNestedDocs(rangeQuery)); assertTrue(new NestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested1")); assertTrue(new NestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested2")); assertTrue(new NestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested3")); assertTrue(new NestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested_missing")); - rangeQuery = mapperService.fullName("nested3.foo2").rangeQuery(2, 5, true, true, null); + rangeQuery = mapperService.fullName("nested3.foo2").rangeQuery(2, 5, true, true, null, null, null, null); assertTrue(new NestedHelper(mapperService).mightMatchNestedDocs(rangeQuery)); assertTrue(new NestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested1")); assertTrue(new NestedHelper(mapperService).mightMatchNonNestedDocs(rangeQuery, "nested2")); diff --git a/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTrackerTests.java b/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTrackerTests.java index 2f7d2dd15ceb5..8d53c69e2713e 100644 --- a/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTrackerTests.java +++ b/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTrackerTests.java @@ -53,8 +53,8 @@ import java.util.stream.Stream; import static java.util.Collections.emptySet; -import static org.elasticsearch.index.seqno.SequenceNumbersService.NO_OPS_PERFORMED; -import static org.elasticsearch.index.seqno.SequenceNumbersService.UNASSIGNED_SEQ_NO; +import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.not; @@ -393,14 +393,14 @@ public void testUpdateAllocationIdsFromMaster() throws Exception { .stream() .filter(a -> a.equals(primaryId) == false) .allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).getLocalCheckpoint() - == SequenceNumbersService.UNASSIGNED_SEQ_NO)); + == SequenceNumbers.UNASSIGNED_SEQ_NO)); assertTrue(initializingIds.stream().noneMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); assertTrue( initializingIds .stream() .filter(a -> a.equals(primaryId) == false) .allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).getLocalCheckpoint() - == SequenceNumbersService.UNASSIGNED_SEQ_NO)); + == SequenceNumbers.UNASSIGNED_SEQ_NO)); // now we will remove some allocation IDs from these and ensure that they propagate through final Set removingActiveAllocationIds = new HashSet<>(randomSubsetOf(activeAllocationIds)); @@ -433,13 +433,13 @@ public void testUpdateAllocationIdsFromMaster() throws Exception { .stream() .filter(a -> a.equals(primaryId) == false) .allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).getLocalCheckpoint() - == SequenceNumbersService.UNASSIGNED_SEQ_NO)); + == SequenceNumbers.UNASSIGNED_SEQ_NO)); assertTrue(newInitializingAllocationIds.stream().noneMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); assertTrue( newInitializingAllocationIds .stream() .allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).getLocalCheckpoint() - == SequenceNumbersService.UNASSIGNED_SEQ_NO)); + == SequenceNumbers.UNASSIGNED_SEQ_NO)); // the tracking allocation IDs should play no role in determining the global checkpoint final Map activeLocalCheckpoints = @@ -737,7 +737,7 @@ private static void activatePrimary(FakeClusterState clusterState, GlobalCheckpo private static void randomLocalCheckpointUpdate(GlobalCheckpointTracker gcp) { String allocationId = randomFrom(gcp.localCheckpoints.keySet()); long currentLocalCheckpoint = gcp.localCheckpoints.get(allocationId).getLocalCheckpoint(); - gcp.updateLocalCheckpoint(allocationId, Math.max(SequenceNumbersService.NO_OPS_PERFORMED, currentLocalCheckpoint + randomInt(5))); + gcp.updateLocalCheckpoint(allocationId, Math.max(SequenceNumbers.NO_OPS_PERFORMED, currentLocalCheckpoint + randomInt(5))); } private static void randomMarkInSync(GlobalCheckpointTracker gcp) { diff --git a/core/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java b/core/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java index e2978ffc51d52..ab513c787c3cb 100644 --- a/core/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java +++ b/core/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java @@ -56,8 +56,8 @@ public static LocalCheckpointTracker createEmptyTracker() { .builder() .put(LocalCheckpointTracker.SETTINGS_BIT_ARRAYS_SIZE.getKey(), SMALL_CHUNK_SIZE) .build()), - SequenceNumbersService.NO_OPS_PERFORMED, - SequenceNumbersService.NO_OPS_PERFORMED + SequenceNumbers.NO_OPS_PERFORMED, + SequenceNumbers.NO_OPS_PERFORMED ); } @@ -70,7 +70,7 @@ public void setUp() throws Exception { public void testSimplePrimary() { long seqNo1, seqNo2; - assertThat(tracker.getCheckpoint(), equalTo(SequenceNumbersService.NO_OPS_PERFORMED)); + assertThat(tracker.getCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); seqNo1 = tracker.generateSeqNo(); assertThat(seqNo1, equalTo(0L)); tracker.markSeqNoAsCompleted(seqNo1); @@ -86,7 +86,7 @@ public void testSimplePrimary() { } public void testSimpleReplica() { - assertThat(tracker.getCheckpoint(), equalTo(SequenceNumbersService.NO_OPS_PERFORMED)); + assertThat(tracker.getCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); tracker.markSeqNoAsCompleted(0L); assertThat(tracker.getCheckpoint(), equalTo(0L)); tracker.markSeqNoAsCompleted(2L); @@ -240,7 +240,7 @@ public void testWaitForOpsToComplete() throws BrokenBarrierException, Interrupte public void testResetCheckpoint() { final int operations = 1024 - scaledRandomIntBetween(0, 1024); - int maxSeqNo = Math.toIntExact(SequenceNumbersService.NO_OPS_PERFORMED); + int maxSeqNo = Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED); for (int i = 0; i < operations; i++) { if (!rarely()) { tracker.markSeqNoAsCompleted(i); @@ -249,7 +249,7 @@ public void testResetCheckpoint() { } final int localCheckpoint = - randomIntBetween(Math.toIntExact(SequenceNumbersService.NO_OPS_PERFORMED), Math.toIntExact(tracker.getCheckpoint())); + randomIntBetween(Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED), Math.toIntExact(tracker.getCheckpoint())); tracker.resetCheckpoint(localCheckpoint); assertThat(tracker.getCheckpoint(), equalTo((long) localCheckpoint)); assertThat(tracker.getMaxSeqNo(), equalTo((long) maxSeqNo)); diff --git a/core/src/test/java/org/elasticsearch/index/seqno/SequenceNumbersTests.java b/core/src/test/java/org/elasticsearch/index/seqno/SequenceNumbersTests.java index 23eac18377017..f835cff3f4656 100644 --- a/core/src/test/java/org/elasticsearch/index/seqno/SequenceNumbersTests.java +++ b/core/src/test/java/org/elasticsearch/index/seqno/SequenceNumbersTests.java @@ -29,31 +29,31 @@ public class SequenceNumbersTests extends ESTestCase { public void testMin() { final long seqNo = randomNonNegativeLong(); - assertThat(SequenceNumbers.min(SequenceNumbersService.NO_OPS_PERFORMED, seqNo), equalTo(seqNo)); + assertThat(SequenceNumbers.min(SequenceNumbers.NO_OPS_PERFORMED, seqNo), equalTo(seqNo)); assertThat( - SequenceNumbers.min(SequenceNumbersService.NO_OPS_PERFORMED, SequenceNumbersService.UNASSIGNED_SEQ_NO), - equalTo(SequenceNumbersService.UNASSIGNED_SEQ_NO)); - assertThat(SequenceNumbers.min(SequenceNumbersService.UNASSIGNED_SEQ_NO, seqNo), equalTo(seqNo)); + SequenceNumbers.min(SequenceNumbers.NO_OPS_PERFORMED, SequenceNumbers.UNASSIGNED_SEQ_NO), + equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); + assertThat(SequenceNumbers.min(SequenceNumbers.UNASSIGNED_SEQ_NO, seqNo), equalTo(seqNo)); final long minSeqNo = randomNonNegativeLong(); assertThat(SequenceNumbers.min(minSeqNo, seqNo), equalTo(Math.min(minSeqNo, seqNo))); final IllegalArgumentException e = - expectThrows(IllegalArgumentException.class, () -> SequenceNumbers.min(minSeqNo, SequenceNumbersService.UNASSIGNED_SEQ_NO)); + expectThrows(IllegalArgumentException.class, () -> SequenceNumbers.min(minSeqNo, SequenceNumbers.UNASSIGNED_SEQ_NO)); assertThat(e, hasToString(containsString("sequence number must be assigned"))); } public void testMax() { final long seqNo = randomNonNegativeLong(); - assertThat(SequenceNumbers.max(SequenceNumbersService.NO_OPS_PERFORMED, seqNo), equalTo(seqNo)); + assertThat(SequenceNumbers.max(SequenceNumbers.NO_OPS_PERFORMED, seqNo), equalTo(seqNo)); assertThat( - SequenceNumbers.max(SequenceNumbersService.NO_OPS_PERFORMED, SequenceNumbersService.UNASSIGNED_SEQ_NO), - equalTo(SequenceNumbersService.UNASSIGNED_SEQ_NO)); - assertThat(SequenceNumbers.max(SequenceNumbersService.UNASSIGNED_SEQ_NO, seqNo), equalTo(seqNo)); + SequenceNumbers.max(SequenceNumbers.NO_OPS_PERFORMED, SequenceNumbers.UNASSIGNED_SEQ_NO), + equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); + assertThat(SequenceNumbers.max(SequenceNumbers.UNASSIGNED_SEQ_NO, seqNo), equalTo(seqNo)); final long maxSeqNo = randomNonNegativeLong(); assertThat(SequenceNumbers.min(maxSeqNo, seqNo), equalTo(Math.min(maxSeqNo, seqNo))); final IllegalArgumentException e = - expectThrows(IllegalArgumentException.class, () -> SequenceNumbers.min(maxSeqNo, SequenceNumbersService.UNASSIGNED_SEQ_NO)); + expectThrows(IllegalArgumentException.class, () -> SequenceNumbers.min(maxSeqNo, SequenceNumbers.UNASSIGNED_SEQ_NO)); assertThat(e, hasToString(containsString("sequence number must be assigned"))); } diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 7e140dd35fe9f..b275da702b083 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -84,7 +84,7 @@ import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; @@ -282,7 +282,7 @@ public void testClosesPreventsNewOperations() throws InterruptedException, Execu // expected } try { - indexShard.acquireReplicaOperationPermit(indexShard.getPrimaryTerm(), SequenceNumbersService.UNASSIGNED_SEQ_NO, null, + indexShard.acquireReplicaOperationPermit(indexShard.getPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, null, ThreadPool.Names.INDEX); fail("we should not be able to increment anymore"); } catch (IndexShardClosedException e) { @@ -294,7 +294,7 @@ public void testRejectOperationPermitWithHigherTermWhenNotStarted() throws IOExc IndexShard indexShard = newShard(false); expectThrows(IndexShardNotStartedException.class, () -> indexShard.acquireReplicaOperationPermit(indexShard.getPrimaryTerm() + randomIntBetween(1, 100), - SequenceNumbersService.UNASSIGNED_SEQ_NO, null, ThreadPool.Names.INDEX)); + SequenceNumbers.UNASSIGNED_SEQ_NO, null, ThreadPool.Names.INDEX)); closeShards(indexShard); } @@ -414,7 +414,7 @@ public void testPrimaryFillsSeqNoGapsOnPromotion() throws Exception { // most of the time this is large enough that most of the time there will be at least one gap final int operations = 1024 - scaledRandomIntBetween(0, 1024); - final Result result = indexOnReplicaWithGaps(indexShard, operations, Math.toIntExact(SequenceNumbersService.NO_OPS_PERFORMED)); + final Result result = indexOnReplicaWithGaps(indexShard, operations, Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED)); final int maxSeqNo = result.maxSeqNo; final boolean gap = result.gap; @@ -592,7 +592,7 @@ public void onFailure(Exception e) { } }; - indexShard.acquireReplicaOperationPermit(primaryTerm - 1, SequenceNumbersService.UNASSIGNED_SEQ_NO, onLockAcquired, + indexShard.acquireReplicaOperationPermit(primaryTerm - 1, SequenceNumbers.UNASSIGNED_SEQ_NO, onLockAcquired, ThreadPool.Names.INDEX); assertFalse(onResponse.get()); @@ -608,12 +608,12 @@ public void onFailure(Exception e) { final CyclicBarrier barrier = new CyclicBarrier(2); final long newPrimaryTerm = primaryTerm + 1 + randomInt(20); if (engineClosed == false) { - assertThat(indexShard.getLocalCheckpoint(), equalTo(SequenceNumbersService.NO_OPS_PERFORMED)); - assertThat(indexShard.getGlobalCheckpoint(), equalTo(SequenceNumbersService.NO_OPS_PERFORMED)); + assertThat(indexShard.getLocalCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + assertThat(indexShard.getGlobalCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); } final long newGlobalCheckPoint; if (engineClosed || randomBoolean()) { - newGlobalCheckPoint = SequenceNumbersService.NO_OPS_PERFORMED; + newGlobalCheckPoint = SequenceNumbers.NO_OPS_PERFORMED; } else { long localCheckPoint = indexShard.getGlobalCheckpoint() + randomInt(100); // advance local checkpoint @@ -623,8 +623,8 @@ public void onFailure(Exception e) { newGlobalCheckPoint = randomIntBetween((int) indexShard.getGlobalCheckpoint(), (int) localCheckPoint); } final long expectedLocalCheckpoint; - if (newGlobalCheckPoint == SequenceNumbersService.UNASSIGNED_SEQ_NO) { - expectedLocalCheckpoint = SequenceNumbersService.NO_OPS_PERFORMED; + if (newGlobalCheckPoint == SequenceNumbers.UNASSIGNED_SEQ_NO) { + expectedLocalCheckpoint = SequenceNumbers.NO_OPS_PERFORMED; } else { expectedLocalCheckpoint = newGlobalCheckPoint; } @@ -714,18 +714,18 @@ private void finish() { public void testRestoreLocalCheckpointTrackerFromTranslogOnPromotion() throws IOException, InterruptedException { final IndexShard indexShard = newStartedShard(false); final int operations = 1024 - scaledRandomIntBetween(0, 1024); - indexOnReplicaWithGaps(indexShard, operations, Math.toIntExact(SequenceNumbersService.NO_OPS_PERFORMED)); + indexOnReplicaWithGaps(indexShard, operations, Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED)); final long maxSeqNo = indexShard.seqNoStats().getMaxSeqNo(); - final long globalCheckpointOnReplica = SequenceNumbersService.UNASSIGNED_SEQ_NO; + final long globalCheckpointOnReplica = SequenceNumbers.UNASSIGNED_SEQ_NO; randomIntBetween( - Math.toIntExact(SequenceNumbersService.UNASSIGNED_SEQ_NO), + Math.toIntExact(SequenceNumbers.UNASSIGNED_SEQ_NO), Math.toIntExact(indexShard.getLocalCheckpoint())); indexShard.updateGlobalCheckpointOnReplica(globalCheckpointOnReplica, "test"); final int globalCheckpoint = randomIntBetween( - Math.toIntExact(SequenceNumbersService.UNASSIGNED_SEQ_NO), + Math.toIntExact(SequenceNumbers.UNASSIGNED_SEQ_NO), Math.toIntExact(indexShard.getLocalCheckpoint())); final CountDownLatch latch = new CountDownLatch(1); @@ -770,17 +770,17 @@ public void testThrowBackLocalCheckpointOnReplica() throws IOException, Interrup // most of the time this is large enough that most of the time there will be at least one gap final int operations = 1024 - scaledRandomIntBetween(0, 1024); - indexOnReplicaWithGaps(indexShard, operations, Math.toIntExact(SequenceNumbersService.NO_OPS_PERFORMED)); + indexOnReplicaWithGaps(indexShard, operations, Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED)); final long globalCheckpointOnReplica = randomIntBetween( - Math.toIntExact(SequenceNumbersService.UNASSIGNED_SEQ_NO), + Math.toIntExact(SequenceNumbers.UNASSIGNED_SEQ_NO), Math.toIntExact(indexShard.getLocalCheckpoint())); indexShard.updateGlobalCheckpointOnReplica(globalCheckpointOnReplica, "test"); final int globalCheckpoint = randomIntBetween( - Math.toIntExact(SequenceNumbersService.UNASSIGNED_SEQ_NO), + Math.toIntExact(SequenceNumbers.UNASSIGNED_SEQ_NO), Math.toIntExact(indexShard.getLocalCheckpoint())); final CountDownLatch latch = new CountDownLatch(1); indexShard.acquireReplicaOperationPermit( @@ -801,9 +801,9 @@ public void onFailure(final Exception e) { ThreadPool.Names.SAME); latch.await(); - if (globalCheckpointOnReplica == SequenceNumbersService.UNASSIGNED_SEQ_NO - && globalCheckpoint == SequenceNumbersService.UNASSIGNED_SEQ_NO) { - assertThat(indexShard.getLocalCheckpoint(), equalTo(SequenceNumbersService.NO_OPS_PERFORMED)); + if (globalCheckpointOnReplica == SequenceNumbers.UNASSIGNED_SEQ_NO + && globalCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO) { + assertThat(indexShard.getLocalCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); } else { assertThat(indexShard.getLocalCheckpoint(), equalTo(Math.max(globalCheckpoint, globalCheckpointOnReplica))); } diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java index 91ea9c6073a4e..f3bf76c57a550 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.index.engine.InternalEngineTests; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -138,7 +138,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { ParsedDocument doc = InternalEngineTests.createParsedDoc("1", null); Engine.Delete delete = new Engine.Delete("test", "1", new Term("_uid", Uid.createUidAsBytes(doc.type(), doc.id()))); Engine.Index index = new Engine.Index(new Term("_uid", Uid.createUidAsBytes(doc.type(), doc.id())), doc); - compositeListener.postDelete(randomShardId, delete, new Engine.DeleteResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, true)); + compositeListener.postDelete(randomShardId, delete, new Engine.DeleteResult(1, SequenceNumbers.UNASSIGNED_SEQ_NO, true)); assertEquals(0, preIndex.get()); assertEquals(0, postIndex.get()); assertEquals(0, postIndexException.get()); @@ -162,7 +162,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { assertEquals(2, postDelete.get()); assertEquals(2, postDeleteException.get()); - compositeListener.postIndex(randomShardId, index, new Engine.IndexResult(0, SequenceNumbersService.UNASSIGNED_SEQ_NO, false)); + compositeListener.postIndex(randomShardId, index, new Engine.IndexResult(0, SequenceNumbers.UNASSIGNED_SEQ_NO, false)); assertEquals(0, preIndex.get()); assertEquals(2, postIndex.get()); assertEquals(0, postIndexException.get()); diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 59e2dd8221853..771302a903f32 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -34,7 +34,6 @@ import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.bytes.BytesArray; @@ -64,7 +63,7 @@ import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.seqno.LocalCheckpointTracker; import org.elasticsearch.index.seqno.LocalCheckpointTrackerTests; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.test.ESTestCase; @@ -146,7 +145,7 @@ protected void afterIfSuccessful() throws Exception { protected Translog createTranslog(TranslogConfig config, String translogUUID) throws IOException { return new Translog(config, translogUUID, createTranslogDeletionPolicy(config.getIndexSettings()), - () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); + () -> SequenceNumbers.UNASSIGNED_SEQ_NO); } private void markCurrentGenAsCommitted(Translog translog) throws IOException { @@ -188,7 +187,7 @@ public void tearDown() throws Exception { } private Translog create(Path path) throws IOException { - globalCheckpoint = new AtomicLong(SequenceNumbersService.UNASSIGNED_SEQ_NO); + globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); final TranslogConfig translogConfig = getTranslogConfig(path); final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings()); return new Translog(translogConfig, null, deletionPolicy, () -> globalCheckpoint.get()); @@ -771,7 +770,7 @@ public void testConcurrentWriteViewsAndSnapshot() throws Throwable { final AtomicBoolean run = new AtomicBoolean(true); final Object flushMutex = new Object(); - final AtomicLong lastCommittedLocalCheckpoint = new AtomicLong(SequenceNumbersService.NO_OPS_PERFORMED); + final AtomicLong lastCommittedLocalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final LocalCheckpointTracker tracker = LocalCheckpointTrackerTests.createEmptyTracker(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); // any errors on threads @@ -1102,10 +1101,10 @@ public void testTranslogWriter() throws IOException { out.writeInt(i); long seqNo; do { - seqNo = opsHaveValidSequenceNumbers ? randomNonNegativeLong() : SequenceNumbersService.UNASSIGNED_SEQ_NO; + seqNo = opsHaveValidSequenceNumbers ? randomNonNegativeLong() : SequenceNumbers.UNASSIGNED_SEQ_NO; opsHaveValidSequenceNumbers = opsHaveValidSequenceNumbers || !rarely(); } while (seenSeqNos.contains(seqNo)); - if (seqNo != SequenceNumbersService.UNASSIGNED_SEQ_NO) { + if (seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { seenSeqNos.add(seqNo); } writer.add(new BytesArray(bytes), seqNo); @@ -1120,8 +1119,8 @@ public void testTranslogWriter() throws IOException { final int value = buffer.getInt(); assertEquals(i, value); } - final long minSeqNo = seenSeqNos.stream().min(Long::compareTo).orElse(SequenceNumbersService.NO_OPS_PERFORMED); - final long maxSeqNo = seenSeqNos.stream().max(Long::compareTo).orElse(SequenceNumbersService.NO_OPS_PERFORMED); + final long minSeqNo = seenSeqNos.stream().min(Long::compareTo).orElse(SequenceNumbers.NO_OPS_PERFORMED); + final long maxSeqNo = seenSeqNos.stream().max(Long::compareTo).orElse(SequenceNumbers.NO_OPS_PERFORMED); assertThat(reader.getCheckpoint().minSeqNo, equalTo(minSeqNo)); assertThat(reader.getCheckpoint().maxSeqNo, equalTo(maxSeqNo)); @@ -1211,7 +1210,7 @@ public void testBasicRecovery() throws IOException { assertNull(snapshot.next()); } } else { - translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); + translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(translogGeneration.translogFileGeneration)) { @@ -1249,7 +1248,7 @@ public void testRecoveryUncommitted() throws IOException { TranslogConfig config = translog.getConfig(); final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); @@ -1263,7 +1262,7 @@ public void testRecoveryUncommitted() throws IOException { } } if (randomBoolean()) { // recover twice - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); @@ -1307,7 +1306,7 @@ public void testRecoveryUncommittedFileExists() throws IOException { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); @@ -1322,7 +1321,7 @@ public void testRecoveryUncommittedFileExists() throws IOException { } if (randomBoolean()) { // recover twice - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); @@ -1359,11 +1358,11 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { TranslogConfig config = translog.getConfig(); Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME); Checkpoint read = Checkpoint.read(ckp); - Checkpoint corrupted = Checkpoint.emptyTranslogCheckpoint(0, 0, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0); + Checkpoint corrupted = Checkpoint.emptyTranslogCheckpoint(0, 0, SequenceNumbers.UNASSIGNED_SEQ_NO, 0); Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), corrupted, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog ignored = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) { + try (Translog ignored = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { fail("corrupted"); } catch (IllegalStateException ex) { assertEquals("Checkpoint file translog-2.ckp already exists but has corrupted content expected: Checkpoint{offset=3123, " + @@ -1371,7 +1370,7 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { "generation=0, minSeqNo=-1, maxSeqNo=-1, globalCheckpoint=-2, minTranslogGeneration=0}", ex.getMessage()); } Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), read, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); @@ -1448,12 +1447,12 @@ public void testOpenForeignTranslog() throws IOException { final String foreignTranslog = randomRealisticUnicodeOfCodepointLengthBetween(1, translogGeneration.translogUUID.length()); try { - new Translog(config, foreignTranslog, createTranslogDeletionPolicy(), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); + new Translog(config, foreignTranslog, createTranslogDeletionPolicy(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); fail("translog doesn't belong to this UUID"); } catch (TranslogCorruptedException ex) { } - this.translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); + this.translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO); try (Translog.Snapshot snapshot = this.translog.newSnapshotFromGen(translogGeneration.translogFileGeneration)) { for (int i = firstUncommitted; i < translogOperations; i++) { Translog.Operation next = snapshot.next(); @@ -1639,7 +1638,7 @@ public void testFailFlush() throws IOException { translog.close(); // we are closed final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) { + try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, tlog.currentFileGeneration()); assertFalse(tlog.syncNeeded()); @@ -1775,7 +1774,7 @@ protected void afterAdd() throws IOException { } } try (Translog tlog = - new Translog(config, translogUUID, createTranslogDeletionPolicy(), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); + new Translog(config, translogUUID, createTranslogDeletionPolicy(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); Translog.Snapshot snapshot = tlog.newSnapshot()) { if (writtenOperations.size() != snapshot.totalOperations()) { for (int i = 0; i < threadCount; i++) { @@ -1820,7 +1819,7 @@ public void testRecoveryFromAFutureGenerationCleansUp() throws IOException { TranslogConfig config = translog.getConfig(); final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1); deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); - translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); + translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO); assertThat(translog.getMinFileGeneration(), equalTo(1L)); // no trimming done yet, just recovered for (long gen = 1; gen < translog.currentFileGeneration(); gen++) { @@ -1874,7 +1873,7 @@ public void testRecoveryFromFailureOnTrimming() throws IOException { } final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1); deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { // we don't know when things broke exactly assertThat(translog.getMinFileGeneration(), greaterThanOrEqualTo(1L)); assertThat(translog.getMinFileGeneration(), lessThanOrEqualTo(comittedGeneration)); @@ -1922,7 +1921,7 @@ public void onceFailedFailAlways() { private Translog getFailableTranslog(final FailSwitch fail, final TranslogConfig config, final boolean partialWrites, final boolean throwUnknownException, String translogUUID, final TranslogDeletionPolicy deletionPolicy) throws IOException { - return new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO) { + return new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO) { @Override ChannelFactory getChannelFactory() { final ChannelFactory factory = super.getChannelFactory(); @@ -2048,7 +2047,7 @@ public void testFailWhileCreateWriteWithRecoveredTLogs() throws IOException { translog.add(new Translog.Index("test", "boom", 0, "boom".getBytes(Charset.forName("UTF-8")))); translog.close(); try { - new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO) { + new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO) { @Override protected TranslogWriter createWriter(long fileGeneration) throws IOException { throw new MockDirectoryWrapper.FakeIOException(); @@ -2101,7 +2100,7 @@ public void testRecoverWithUnbackedNextGenInIllegalState() throws IOException { Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog")); try { - Translog tlog = new Translog(config, translog.getTranslogUUID(), translog.getDeletionPolicy(), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); + Translog tlog = new Translog(config, translog.getTranslogUUID(), translog.getDeletionPolicy(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); fail("file already exists?"); } catch (TranslogException ex) { // all is well @@ -2123,7 +2122,7 @@ public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog")); // we add N+1 and N+2 to ensure we only delete the N+1 file and never jump ahead and wipe without the right condition Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 2) + ".tlog")); - try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) { + try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { assertFalse(tlog.syncNeeded()); try (Translog.Snapshot snapshot = tlog.newSnapshot()) { for (int i = 0; i < 1; i++) { @@ -2136,7 +2135,7 @@ public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { } try { - Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); + Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO); fail("file already exists?"); } catch (TranslogException ex) { // all is well @@ -2239,8 +2238,8 @@ public void testWithRandomException() throws IOException { fail.failNever(); // we don't wanna fail here but we might since we write a new checkpoint and create a new tlog file TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(); deletionPolicy.setMinTranslogGenerationForRecovery(minGenForRecovery); - try (Translog translog = new Translog(config, generationUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); - Translog.Snapshot snapshot = translog.newSnapshotFromGen(minGenForRecovery)) { + try (Translog translog = new Translog(config, generationUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + Translog.Snapshot snapshot = translog.newSnapshotFromGen(minGenForRecovery)) { assertEquals(syncedDocs.size(), snapshot.totalOperations()); for (int i = 0; i < syncedDocs.size(); i++) { Translog.Operation next = snapshot.next(); @@ -2304,14 +2303,14 @@ public void testPendingDelete() throws IOException { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(config.getIndexSettings()); translog.close(); - translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); + translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO); translog.add(new Translog.Index("test", "2", 1, new byte[]{2})); translog.rollGeneration(); Closeable lock = translog.acquireRetentionLock(); translog.add(new Translog.Index("test", "3", 2, new byte[]{3})); translog.close(); IOUtils.close(lock); - translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); + translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO); } public static Translog.Location randomTranslogLocation() { diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java index f6aafe765f56f..46761698610a5 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.translog; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -86,10 +86,10 @@ public void testTruncatedTranslog() throws Exception { public TranslogReader openReader(final Path path, final long id) throws IOException { try (FileChannel channel = FileChannel.open(path, StandardOpenOption.READ)) { - final long minSeqNo = SequenceNumbersService.NO_OPS_PERFORMED; - final long maxSeqNo = SequenceNumbersService.NO_OPS_PERFORMED; + final long minSeqNo = SequenceNumbers.NO_OPS_PERFORMED; + final long maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED; final Checkpoint checkpoint = - new Checkpoint(Files.size(path), 1, id, minSeqNo, maxSeqNo, SequenceNumbersService.UNASSIGNED_SEQ_NO, id); + new Checkpoint(Files.size(path), 1, id, minSeqNo, maxSeqNo, SequenceNumbers.UNASSIGNED_SEQ_NO, id); return TranslogReader.open(channel, path, checkpoint, null); } } diff --git a/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java b/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java index 63f889179a27c..7abb603b8eb2d 100644 --- a/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java +++ b/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java @@ -250,6 +250,6 @@ public void testDocumentWithBlankFieldName() { ); assertThat(e.getMessage(), containsString("failed to parse")); assertThat(e.getRootCause().getMessage(), - containsString("object field starting or ending with a [.] makes object resolution ambiguous: []")); + containsString("field name cannot be an empty string")); } } diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index feff696e94201..b1725ead326d9 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -75,7 +75,6 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.IndexEventListener; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.gateway.TestGatewayAllocator; import org.elasticsearch.threadpool.ThreadPool; @@ -87,7 +86,6 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; -import java.util.function.Consumer; import java.util.stream.Collectors; import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom; diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index 437d1eaa84150..f876f6bf80dbc 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -25,7 +25,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.SourceToParse; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.shard.ShardId; @@ -73,7 +73,7 @@ Path translogLocation() { translogLocation.set(replica.getTranslog().location()); - assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(SequenceNumbersService.UNASSIGNED_SEQ_NO)); + assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); final Translog translog = replica.getTranslog(); translogLocation.set( @@ -87,7 +87,7 @@ Path translogLocation() { translogLocation.set(replica.getTranslog().location()); // commit is not good, global checkpoint is below max - assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(SequenceNumbersService.UNASSIGNED_SEQ_NO)); + assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); translogLocation.set( writeTranslog(replica.shardId(), translog.getTranslogUUID(), translog.currentFileGeneration(), maxSeqNo)); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 4f1a2364d184b..9f280839e8638 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -56,7 +56,7 @@ import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.seqno.SeqNoStats; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardRelocatedException; import org.elasticsearch.index.shard.IndexShardState; @@ -106,7 +106,7 @@ public void testSendFiles() throws Throwable { null, randomBoolean(), randomNonNegativeLong(), - randomBoolean() ? SequenceNumbersService.UNASSIGNED_SEQ_NO : randomNonNegativeLong()); + randomBoolean() ? SequenceNumbers.UNASSIGNED_SEQ_NO : randomNonNegativeLong()); Store store = newStore(createTempDir()); RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request, recoverySettings.getChunkSize().bytesAsInt(), Settings.EMPTY); @@ -154,7 +154,7 @@ public void close() throws IOException { public void testSendSnapshotSendsOps() throws IOException { final RecoverySettings recoverySettings = new RecoverySettings(Settings.EMPTY, service); final int fileChunkSizeInBytes = recoverySettings.getChunkSize().bytesAsInt(); - final long startingSeqNo = randomBoolean() ? SequenceNumbersService.UNASSIGNED_SEQ_NO : randomIntBetween(0, 16); + final long startingSeqNo = randomBoolean() ? SequenceNumbers.UNASSIGNED_SEQ_NO : randomIntBetween(0, 16); final StartRecoveryRequest request = new StartRecoveryRequest( shardId, null, @@ -163,7 +163,7 @@ public void testSendSnapshotSendsOps() throws IOException { null, randomBoolean(), randomNonNegativeLong(), - randomBoolean() ? SequenceNumbersService.UNASSIGNED_SEQ_NO : randomNonNegativeLong()); + randomBoolean() ? SequenceNumbers.UNASSIGNED_SEQ_NO : randomNonNegativeLong()); final IndexShard shard = mock(IndexShard.class); when(shard.state()).thenReturn(IndexShardState.STARTED); final RecoveryTargetHandler recoveryTarget = mock(RecoveryTargetHandler.class); @@ -173,7 +173,7 @@ public void testSendSnapshotSendsOps() throws IOException { final int initialNumberOfDocs = randomIntBetween(16, 64); for (int i = 0; i < initialNumberOfDocs; i++) { final Engine.Index index = getIndex(Integer.toString(i)); - operations.add(new Translog.Index(index, new Engine.IndexResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, true))); + operations.add(new Translog.Index(index, new Engine.IndexResult(1, SequenceNumbers.UNASSIGNED_SEQ_NO, true))); } final int numberOfDocsWithValidSequenceNumbers = randomIntBetween(16, 64); for (int i = initialNumberOfDocs; i < initialNumberOfDocs + numberOfDocsWithValidSequenceNumbers; i++) { @@ -199,7 +199,7 @@ public Translog.Operation next() throws IOException { return operations.get(counter++); } }); - if (startingSeqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO) { + if (startingSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO) { assertThat(result.totalOperations, equalTo(initialNumberOfDocs + numberOfDocsWithValidSequenceNumbers)); } else { assertThat(result.totalOperations, equalTo(Math.toIntExact(numberOfDocsWithValidSequenceNumbers - startingSeqNo))); @@ -237,7 +237,7 @@ public void testHandleCorruptedIndexOnSendSendFiles() throws Throwable { null, randomBoolean(), randomNonNegativeLong(), - randomBoolean() ? SequenceNumbersService.UNASSIGNED_SEQ_NO : 0L); + randomBoolean() ? SequenceNumbers.UNASSIGNED_SEQ_NO : 0L); Path tempDir = createTempDir(); Store store = newStore(tempDir, false); AtomicBoolean failedEngine = new AtomicBoolean(false); @@ -307,7 +307,7 @@ public void testHandleExceptinoOnSendSendFiles() throws Throwable { null, randomBoolean(), randomNonNegativeLong(), - randomBoolean() ? SequenceNumbersService.UNASSIGNED_SEQ_NO : 0L); + randomBoolean() ? SequenceNumbers.UNASSIGNED_SEQ_NO : 0L); Path tempDir = createTempDir(); Store store = newStore(tempDir, false); AtomicBoolean failedEngine = new AtomicBoolean(false); @@ -373,7 +373,7 @@ public void testThrowExceptionOnPrimaryRelocatedBeforePhase1Started() throws IOE null, false, randomNonNegativeLong(), - attemptSequenceNumberBasedRecovery ? randomNonNegativeLong() : SequenceNumbersService.UNASSIGNED_SEQ_NO); + attemptSequenceNumberBasedRecovery ? randomNonNegativeLong() : SequenceNumbers.UNASSIGNED_SEQ_NO); final IndexShard shard = mock(IndexShard.class); when(shard.seqNoStats()).thenReturn(mock(SeqNoStats.class)); when(shard.segmentStats(anyBoolean())).thenReturn(mock(SegmentsStats.class)); @@ -412,7 +412,7 @@ void prepareTargetForTranslog(final int totalTranslogOps) throws IOException { @Override long phase2(long startingSeqNo, Translog.Snapshot snapshot) throws IOException { phase2Called.set(true); - return SequenceNumbersService.UNASSIGNED_SEQ_NO; + return SequenceNumbers.UNASSIGNED_SEQ_NO; } }; diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java index 85a9ee10208c7..b478243392e1b 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; import org.elasticsearch.test.ESTestCase; @@ -49,7 +49,7 @@ public void testSerialization() throws Exception { Store.MetadataSnapshot.EMPTY, randomBoolean(), randomNonNegativeLong(), - randomBoolean() ? SequenceNumbersService.UNASSIGNED_SEQ_NO : randomNonNegativeLong()); + randomBoolean() ? SequenceNumbers.UNASSIGNED_SEQ_NO : randomNonNegativeLong()); final ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); final OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); @@ -72,7 +72,7 @@ public void testSerialization() throws Exception { if (targetNodeVersion.onOrAfter(Version.V_6_0_0_alpha1)) { assertThat(outRequest.startingSeqNo(), equalTo(inRequest.startingSeqNo())); } else { - assertThat(SequenceNumbersService.UNASSIGNED_SEQ_NO, equalTo(inRequest.startingSeqNo())); + assertThat(SequenceNumbers.UNASSIGNED_SEQ_NO, equalTo(inRequest.startingSeqNo())); } } diff --git a/core/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksIT.java b/core/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksIT.java index cb45a639c07eb..3d9b2aab7ad16 100644 --- a/core/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksIT.java +++ b/core/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksIT.java @@ -21,6 +21,7 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.test.ESIntegTestCase; @@ -42,7 +43,7 @@ public void testGetSettingsWithBlocks() throws Exception { .setSettings(Settings.builder() .put("index.refresh_interval", -1) .put("index.merge.policy.expunge_deletes_allowed", "30") - .put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false))); + .put(FieldMapper.IGNORE_MALFORMED_SETTING.getKey(), false))); for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY, SETTING_READ_ONLY_ALLOW_DELETE)) { try { @@ -51,7 +52,7 @@ public void testGetSettingsWithBlocks() throws Exception { assertThat(response.getIndexToSettings().size(), greaterThanOrEqualTo(1)); assertThat(response.getSetting("test", "index.refresh_interval"), equalTo("-1")); assertThat(response.getSetting("test", "index.merge.policy.expunge_deletes_allowed"), equalTo("30")); - assertThat(response.getSetting("test", MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey()), equalTo("false")); + assertThat(response.getSetting("test", FieldMapper.IGNORE_MALFORMED_SETTING.getKey()), equalTo("false")); } finally { disableIndexBlock("test", block); } diff --git a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java index 5aa9ddc453323..2da49af12be5f 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java @@ -107,7 +107,7 @@ public void testUnassignedShardAndEmptyNodesInRoutingTable() throws Exception { .nodes(DiscoveryNodes.EMPTY_NODES) .build(), false ); - RoutingAllocation routingAllocation = new RoutingAllocation(allocationDeciders, routingNodes, current, ClusterInfo.EMPTY, System.nanoTime(), false); + RoutingAllocation routingAllocation = new RoutingAllocation(allocationDeciders, routingNodes, current, ClusterInfo.EMPTY, System.nanoTime()); allocator.allocateUnassigned(routingAllocation); } diff --git a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java index 48f6fdeaedbbd..4eb4f9606aea1 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -45,7 +45,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.seqno.SeqNoStats; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; @@ -119,7 +119,7 @@ protected void beforeIndexDeletion() throws Exception { ShardStats primary = maybePrimary.get(); final SeqNoStats primarySeqNoStats = primary.getSeqNoStats(); assertThat(primary.getShardRouting() + " should have set the global checkpoint", - primarySeqNoStats.getGlobalCheckpoint(), not(equalTo(SequenceNumbersService.UNASSIGNED_SEQ_NO))); + primarySeqNoStats.getGlobalCheckpoint(), not(equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO))); for (ShardStats shardStats : indexShardStats) { final SeqNoStats seqNoStats = shardStats.getSeqNoStats(); assertThat(shardStats.getShardRouting() + " local checkpoint mismatch", diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index c43924728a2a4..3822455b83c3a 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; @@ -942,7 +943,10 @@ public void testTopHitsInNested() throws Exception { } } - public void testDontExplode() throws Exception { + public void testUseMaxDocInsteadOfSize() throws Exception { + client().admin().indices().prepareUpdateSettings("idx") + .setSettings(Collections.singletonMap(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), ArrayUtil.MAX_ARRAY_LENGTH)) + .get(); SearchResponse response = client() .prepareSearch("idx") .addAggregation(terms("terms") @@ -954,6 +958,67 @@ public void testDontExplode() throws Exception { ) .get(); assertNoFailures(response); + client().admin().indices().prepareUpdateSettings("idx") + .setSettings(Collections.singletonMap(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), null)) + .get(); + } + + public void testTooHighResultWindow() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation(terms("terms") + .executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation( + topHits("hits").from(50).size(10).sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)) + ) + ) + .get(); + assertNoFailures(response); + + Exception e = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch("idx") + .addAggregation(terms("terms") + .executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation( + topHits("hits").from(100).size(10).sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)) + ) + ).get()); + assertThat(e.getCause().getMessage(), + containsString("the top hits aggregator [hits]'s from + size must be less than or equal to: [100] but was [110]")); + e = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch("idx") + .addAggregation(terms("terms") + .executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation( + topHits("hits").from(10).size(100).sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)) + ) + ).get()); + assertThat(e.getCause().getMessage(), + containsString("the top hits aggregator [hits]'s from + size must be less than or equal to: [100] but was [110]")); + + client().admin().indices().prepareUpdateSettings("idx") + .setSettings(Collections.singletonMap(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), 110)) + .get(); + response = client().prepareSearch("idx") + .addAggregation(terms("terms") + .executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation( + topHits("hits").from(100).size(10).sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)) + )).get(); + assertNoFailures(response); + response = client().prepareSearch("idx") + .addAggregation(terms("terms") + .executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation( + topHits("hits").from(10).size(100).sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)) + )).get(); + assertNoFailures(response); + client().admin().indices().prepareUpdateSettings("idx") + .setSettings(Collections.singletonMap(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), null)) + .get(); } public void testNoStoredFields() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java index 55a424754d51f..079db8097f2b0 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java @@ -23,10 +23,12 @@ import org.apache.lucene.util.ArrayUtil; import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -629,8 +631,11 @@ public void testInnerHitsWithIgnoreUnmapped() throws Exception { assertSearchHits(response, "1", "3"); } - public void testDontExplode() throws Exception { + public void testUseMaxDocInsteadOfSize() throws Exception { assertAcked(prepareCreate("index2").addMapping("type", "nested", "type=nested")); + client().admin().indices().prepareUpdateSettings("index2") + .setSettings(Collections.singletonMap(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), ArrayUtil.MAX_ARRAY_LENGTH)) + .get(); client().prepareIndex("index2", "type", "1").setSource(jsonBuilder().startObject() .startArray("nested") .startObject() @@ -650,4 +655,50 @@ public void testDontExplode() throws Exception { assertHitCount(response, 1); } + public void testTooHighResultWindow() throws Exception { + assertAcked(prepareCreate("index2").addMapping("type", "nested", "type=nested")); + client().prepareIndex("index2", "type", "1").setSource(jsonBuilder().startObject() + .startArray("nested") + .startObject() + .field("field", "value1") + .endObject() + .endArray() + .endObject()) + .setRefreshPolicy(IMMEDIATE) + .get(); + SearchResponse response = client().prepareSearch("index2") + .setQuery(nestedQuery("nested", matchQuery("nested.field", "value1"), ScoreMode.Avg) + .innerHit(new InnerHitBuilder().setFrom(50).setSize(10).setName("_name"))) + .get(); + assertNoFailures(response); + assertHitCount(response, 1); + + Exception e = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch("index2") + .setQuery(nestedQuery("nested", matchQuery("nested.field", "value1"), ScoreMode.Avg) + .innerHit(new InnerHitBuilder().setFrom(100).setSize(10).setName("_name"))) + .get()); + assertThat(e.getCause().getMessage(), + containsString("the inner hit definition's [_name]'s from + size must be less than or equal to: [100] but was [110]")); + e = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch("index2") + .setQuery(nestedQuery("nested", matchQuery("nested.field", "value1"), ScoreMode.Avg) + .innerHit(new InnerHitBuilder().setFrom(10).setSize(100).setName("_name"))) + .get()); + assertThat(e.getCause().getMessage(), + containsString("the inner hit definition's [_name]'s from + size must be less than or equal to: [100] but was [110]")); + + client().admin().indices().prepareUpdateSettings("index2") + .setSettings(Collections.singletonMap(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), 110)) + .get(); + response = client().prepareSearch("index2") + .setQuery(nestedQuery("nested", matchQuery("nested.field", "value1"), ScoreMode.Avg) + .innerHit(new InnerHitBuilder().setFrom(100).setSize(10).setName("_name"))) + .get(); + assertNoFailures(response); + response = client().prepareSearch("index2") + .setQuery(nestedQuery("nested", matchQuery("nested.field", "value1"), ScoreMode.Avg) + .innerHit(new InnerHitBuilder().setFrom(10).setSize(100).setName("_name"))) + .get(); + assertNoFailures(response); + } + } diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java b/core/src/test/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java index 0cb6a52bbefef..ab23dfbe21928 100644 --- a/core/src/test/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java +++ b/core/src/test/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java @@ -38,6 +38,9 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.rescore.QueryRescoreMode; import org.elasticsearch.search.rescore.QueryRescorerBuilder; +import org.elasticsearch.search.sort.SortBuilder; +import org.elasticsearch.search.sort.SortBuilders; +import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.Arrays; @@ -66,6 +69,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThirdHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasScore; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -705,4 +709,45 @@ public void testFromSize() throws Exception { assertEquals(4, request.get().getHits().getHits().length); } + + public void testRescorePhaseWithInvalidSort() throws Exception { + assertAcked(prepareCreate("test")); + for(int i=0;i<5;i++) { + client().prepareIndex("test", "type", ""+i).setSource("number", 0).get(); + } + refresh(); + + Exception exc = expectThrows(Exception.class, + () -> client().prepareSearch() + .addSort(SortBuilders.fieldSort("number")) + .setTrackScores(true) + .addRescorer(new QueryRescorerBuilder(matchAllQuery()), 50) + .get() + ); + assertNotNull(exc.getCause()); + assertThat(exc.getCause().getMessage(), + containsString("Cannot use [sort] option in conjunction with [rescore].")); + + exc = expectThrows(Exception.class, + () -> client().prepareSearch() + .addSort(SortBuilders.fieldSort("number")) + .addSort(SortBuilders.scoreSort()) + .setTrackScores(true) + .addRescorer(new QueryRescorerBuilder(matchAllQuery()), 50) + .get() + ); + assertNotNull(exc.getCause()); + assertThat(exc.getCause().getMessage(), + containsString("Cannot use [sort] option in conjunction with [rescore].")); + + SearchResponse resp = client().prepareSearch().addSort(SortBuilders.scoreSort()) + .setTrackScores(true) + .addRescorer(new QueryRescorerBuilder(matchAllQuery()).setRescoreQueryWeight(100.0f), 50) + .get(); + assertThat(resp.getHits().totalHits, equalTo(5L)); + assertThat(resp.getHits().getHits().length, equalTo(5)); + for (SearchHit hit : resp.getHits().getHits()) { + assertThat(hit.getScore(), equalTo(101f)); + } + } } diff --git a/core/src/test/java/org/elasticsearch/search/query/QueryStringIT.java b/core/src/test/java/org/elasticsearch/search/query/QueryStringIT.java index 651d8a8fc8399..a44d6ff6d5e84 100644 --- a/core/src/test/java/org/elasticsearch/search/query/QueryStringIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/QueryStringIT.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryStringQueryBuilder; @@ -46,6 +47,7 @@ import java.util.List; import java.util.Set; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -351,6 +353,37 @@ public void testGraphQueries() throws Exception { assertSearchHits(searchResponse, "1", "2", "3"); } + public void testLimitOnExpandedFields() throws Exception { + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + builder.startObject("type1"); + builder.startObject("properties"); + for (int i = 0; i < 1025; i++) { + builder.startObject("field" + i).field("type", "text").endObject(); + } + builder.endObject(); // properties + builder.endObject(); // type1 + builder.endObject(); + + assertAcked(prepareCreate("toomanyfields") + .setSettings(Settings.builder().put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), 1200)) + .addMapping("type1", builder)); + + client().prepareIndex("toomanyfields", "type1", "1").setSource("field171", "foo bar baz").get(); + refresh(); + + Exception e = expectThrows(Exception.class, () -> { + QueryStringQueryBuilder qb = queryStringQuery("bar"); + if (randomBoolean()) { + qb.useAllFields(true); + } + logger.info("--> using {}", qb); + client().prepareSearch("toomanyfields").setQuery(qb).get(); + }); + assertThat(ExceptionsHelper.detailedMessage(e), + containsString("field expansion matches too many fields, limit: 1024, got: 1025")); + } + private void assertHits(SearchHits hits, String... ids) { assertThat(hits.getTotalHits(), equalTo((long) ids.length)); Set hitIds = new HashSet<>(); diff --git a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index a4387c9611fe6..3c4a0f8823024 100644 --- a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -1788,12 +1788,6 @@ public void testRangeQueryWithTimeZone() throws Exception { .get(); assertHitCount(searchResponse, 1L); assertThat(searchResponse.getHits().getAt(0).getId(), is("4")); - - // A Range Filter on a numeric field with a TimeZone should raise an exception - e = expectThrows(SearchPhaseExecutionException.class, () -> - client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("num").from("0").to("4").timeZone("-01:00")) - .get()); } public void testSearchEmptyDoc() { diff --git a/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java b/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java index b06bdf6cdc98f..398b30abbe14e 100644 --- a/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java @@ -24,11 +24,15 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.SimpleQueryStringBuilder; import org.elasticsearch.index.query.SimpleQueryStringFlag; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHit; @@ -540,6 +544,38 @@ public void testAllFieldsWithSpecifiedLeniency() throws IOException { containsString("NumberFormatException[For input string: \"foo123\"]")); } + + public void testLimitOnExpandedFields() throws Exception { + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + builder.startObject("type1"); + builder.startObject("properties"); + for (int i = 0; i < 1025; i++) { + builder.startObject("field" + i).field("type", "text").endObject(); + } + builder.endObject(); // properties + builder.endObject(); // type1 + builder.endObject(); + + assertAcked(prepareCreate("toomanyfields") + .setSettings(Settings.builder().put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), 1200)) + .addMapping("type1", builder)); + + client().prepareIndex("toomanyfields", "type1", "1").setSource("field171", "foo bar baz").get(); + refresh(); + + Exception e = expectThrows(Exception.class, () -> { + SimpleQueryStringBuilder qb = simpleQueryStringQuery("bar"); + if (randomBoolean()) { + qb.useAllFields(true); + } + logger.info("--> using {}", qb); + client().prepareSearch("toomanyfields").setQuery(qb).get(); + }); + assertThat(ExceptionsHelper.detailedMessage(e), + containsString("field expansion matches too many fields, limit: 1024, got: 1025")); + } + private void assertHits(SearchHits hits, String... ids) { assertThat(hits.getTotalHits(), equalTo((long) ids.length)); Set hitIds = new HashSet<>(); diff --git a/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java b/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java index c887b20e11f63..c782bf14db85d 100644 --- a/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java +++ b/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java @@ -19,6 +19,8 @@ package org.elasticsearch.search.scroll; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.search.ClearScrollResponse; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -35,10 +37,12 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.query.QueryPhaseExecutionException; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; +import org.junit.After; import java.io.IOException; import java.util.Map; @@ -54,6 +58,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; @@ -63,6 +68,13 @@ * Tests for scrolling. */ public class SearchScrollIT extends ESIntegTestCase { + @After + public void cleanup() throws Exception { + assertAcked(client().admin().cluster().prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("*")) + .setTransientSettings(Settings.builder().putNull("*"))); + } + public void testSimpleScrollQueryThenFetch() throws Exception { client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 3)).execute().actionGet(); client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); @@ -518,6 +530,73 @@ public void testCloseAndReopenOrDeleteWithActiveScroll() throws IOException { } } + public void testScrollInvalidDefaultKeepAlive() throws IOException { + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> + client().admin().cluster().prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("search.max_keep_alive", "1m", "search.default_keep_alive", "2m")).get()); + assertThat(exc.getMessage(), containsString("was (2 minutes > 1 minute)")); + + assertAcked(client().admin().cluster().prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "5m", "search.max_keep_alive", "5m")).get()); + + assertAcked(client().admin().cluster().prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "2m")).get()); + + assertAcked(client().admin().cluster().prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("search.max_keep_alive", "2m")).get()); + + + exc = expectThrows(IllegalArgumentException.class, () -> client().admin().cluster().prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "3m")).get()); + assertThat(exc.getMessage(), containsString("was (3 minutes > 2 minutes)")); + + assertAcked(client().admin().cluster().prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "1m")).get()); + + exc = expectThrows(IllegalArgumentException.class, () -> client().admin().cluster().prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("search.max_keep_alive", "30s")).get()); + assertThat(exc.getMessage(), containsString("was (1 minute > 30 seconds)")); + } + + public void testInvalidScrollKeepAlive() throws IOException { + createIndex("test"); + for (int i = 0; i < 2; i++) { + client().prepareIndex("test", "type1", + Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).execute().actionGet(); + } + refresh(); + assertAcked(client().admin().cluster().prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "5m", "search.max_keep_alive", "5m")).get()); + + Exception exc = expectThrows(Exception.class, + () -> client().prepareSearch() + .setQuery(matchAllQuery()) + .setSize(1) + .setScroll(TimeValue.timeValueHours(2)) + .execute().actionGet()); + QueryPhaseExecutionException queryPhaseExecutionException = + (QueryPhaseExecutionException) ExceptionsHelper.unwrap(exc, QueryPhaseExecutionException.class); + assertNotNull(queryPhaseExecutionException); + assertThat(queryPhaseExecutionException.getMessage(), containsString("Keep alive for scroll (2 hours) is too large")); + + SearchResponse searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .setSize(1) + .setScroll(TimeValue.timeValueMinutes(5)) + .execute().actionGet(); + assertNotNull(searchResponse.getScrollId()); + assertThat(searchResponse.getHits().getTotalHits(), equalTo(2L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(1)); + + exc = expectThrows(Exception.class, + () -> client().prepareSearchScroll(searchResponse.getScrollId()) + .setScroll(TimeValue.timeValueHours(3)).get()); + queryPhaseExecutionException = + (QueryPhaseExecutionException) ExceptionsHelper.unwrap(exc, QueryPhaseExecutionException.class); + assertNotNull(queryPhaseExecutionException); + assertThat(queryPhaseExecutionException.getMessage(), containsString("Keep alive for scroll (3 hours) is too large")); + } + private void assertToXContentResponse(ClearScrollResponse response, boolean succeed, int numFreed) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); response.toXContent(builder, ToXContent.EMPTY_PARAMS); diff --git a/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java b/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java index 0534123e0f04e..d05ddf4ee640e 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java @@ -76,13 +76,14 @@ public abstract class AbstractSortTestCase> extends EST private static NamedXContentRegistry xContentRegistry; private static ScriptService scriptService; + protected static String MOCK_SCRIPT_NAME = "dummy"; @BeforeClass public static void init() { Settings baseSettings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - Map, Object>> scripts = Collections.singletonMap("dummy", p -> null); + Map, Object>> scripts = Collections.singletonMap(MOCK_SCRIPT_NAME, p -> null); ScriptEngine engine = new MockScriptEngine(MockScriptEngine.NAME, scripts); scriptService = new ScriptService(baseSettings, Collections.singletonMap(engine.getType(), engine), ScriptModule.CORE_CONTEXTS); @@ -134,9 +135,14 @@ public void testFromXContent() throws IOException { assertNotSame(testItem, parsedItem); assertEquals(testItem, parsedItem); assertEquals(testItem.hashCode(), parsedItem.hashCode()); + assertWarnings(testItem); } } + protected void assertWarnings(T testItem) { + // assert potential warnings based on the test sort configuration. Do nothing by default, subtests can overwrite + } + /** * test that build() outputs a {@link SortField} that is similar to the one * we would get when parsing the xContent the sort builder is rendering out diff --git a/core/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java index b368ed651ce0f..72c0236c8b71d 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java @@ -19,23 +19,32 @@ package org.elasticsearch.search.sort; +import org.apache.lucene.index.Term; import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSelector; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSelector; import org.apache.lucene.search.SortedSetSortField; +import org.apache.lucene.search.TermQuery; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource; +import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.TypeFieldMapper; +import org.elasticsearch.index.query.MatchNoneQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardException; +import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.MultiValueMode; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -79,11 +88,19 @@ public FieldSortBuilder randomFieldSortBuilder() { if (randomBoolean()) { builder.sortMode(randomFrom(SortMode.values())); } - if (randomBoolean()) { - builder.setNestedSort(createRandomNestedSort(3)); + if (randomBoolean()) { + builder.setNestedSort(createRandomNestedSort(3)); + } else { + // the following are alternative ways to setNestedSort for nested sorting + if (randomBoolean()) { + builder.setNestedFilter(randomNestedFilter()); + } + if (randomBoolean()) { + builder.setNestedPath(randomAlphaOfLengthBetween(1, 10)); + } + } } - return builder; } @@ -93,7 +110,16 @@ protected FieldSortBuilder mutate(FieldSortBuilder original) throws IOException int parameter = randomIntBetween(0, 4); switch (parameter) { case 0: - mutated.setNestedSort(randomValueOtherThan(original.getNestedSort(), () -> NestedSortBuilderTests.createRandomNestedSort(3))); + if (original.getNestedPath() == null && original.getNestedFilter() == null) { + mutated.setNestedSort( + randomValueOtherThan(original.getNestedSort(), () -> NestedSortBuilderTests.createRandomNestedSort(3))); + } else { + if (randomBoolean()) { + mutated.setNestedPath(randomValueOtherThan(original.getNestedPath(), () -> randomAlphaOfLengthBetween(1, 10))); + } else { + mutated.setNestedFilter(randomValueOtherThan(original.getNestedFilter(), () -> randomNestedFilter())); + } + } break; case 1: mutated.sortMode(randomValueOtherThan(original.sortMode(), () -> randomFrom(SortMode.values()))); @@ -242,20 +268,34 @@ public void testMultiValueMode() throws IOException { public void testBuildNested() throws IOException { QueryShardContext shardContextMock = createMockShardContext(); - FieldSortBuilder sortBuilder = new FieldSortBuilder("value").setNestedPath("path"); + FieldSortBuilder sortBuilder = new FieldSortBuilder("fieldName") + .setNestedSort(new NestedSortBuilder("path").setFilter(QueryBuilders.termQuery(MAPPED_STRING_FIELDNAME, "value"))); SortField sortField = sortBuilder.build(shardContextMock).field; assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class)); XFieldComparatorSource comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); - assertNotNull(comparatorSource.nested()); + Nested nested = comparatorSource.nested(); + assertNotNull(nested); + assertEquals(new TermQuery(new Term(MAPPED_STRING_FIELDNAME, "value")), nested.getInnerQuery()); + + sortBuilder = new FieldSortBuilder("fieldName").setNestedPath("path"); + sortField = sortBuilder.build(shardContextMock).field; + assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class)); + comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); + nested = comparatorSource.nested(); + assertNotNull(nested); + assertEquals(new TermQuery(new Term(TypeFieldMapper.NAME, "__path")), nested.getInnerQuery()); - sortBuilder = new FieldSortBuilder("value").setNestedPath("path").setNestedFilter(QueryBuilders.termQuery("field", 10.0)); + sortBuilder = new FieldSortBuilder("fieldName").setNestedPath("path") + .setNestedFilter(QueryBuilders.termQuery(MAPPED_STRING_FIELDNAME, "value")); sortField = sortBuilder.build(shardContextMock).field; assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class)); comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); - assertNotNull(comparatorSource.nested()); + nested = comparatorSource.nested(); + assertNotNull(nested); + assertEquals(new TermQuery(new Term(MAPPED_STRING_FIELDNAME, "value")), nested.getInnerQuery()); // if nested path is missing, we omit any filter and return a SortedNumericSortField - sortBuilder = new FieldSortBuilder("value").setNestedFilter(QueryBuilders.termQuery("field", 10.0)); + sortBuilder = new FieldSortBuilder("fieldName").setNestedFilter(QueryBuilders.termQuery(MAPPED_STRING_FIELDNAME, "value")); sortField = sortBuilder.build(shardContextMock).field; assertThat(sortField, instanceOf(SortedNumericSortField.class)); } @@ -315,6 +355,70 @@ public void testModeNonNumericField() throws IOException { assertEquals(expectedError, e.getMessage()); } + /** + * Test we can either set nested sort via path/filter or via nested sort builder, not both + */ + public void testNestedSortBothThrows() throws IOException { + FieldSortBuilder sortBuilder = new FieldSortBuilder(MAPPED_STRING_FIELDNAME); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, + () -> sortBuilder.setNestedPath("nestedPath").setNestedSort(new NestedSortBuilder("otherPath"))); + assertEquals("Setting both nested_path/nested_filter and nested not allowed", iae.getMessage()); + iae = expectThrows(IllegalArgumentException.class, + () -> sortBuilder.setNestedSort(new NestedSortBuilder("otherPath")).setNestedPath("nestedPath")); + assertEquals("Setting both nested_path/nested_filter and nested not allowed", iae.getMessage()); + iae = expectThrows(IllegalArgumentException.class, + () -> sortBuilder.setNestedSort(new NestedSortBuilder("otherPath")).setNestedFilter(QueryBuilders.matchAllQuery())); + assertEquals("Setting both nested_path/nested_filter and nested not allowed", iae.getMessage()); + } + + /** + * Test the the nested Filter gets rewritten + */ + public void testNestedRewrites() throws IOException { + FieldSortBuilder sortBuilder = new FieldSortBuilder(MAPPED_STRING_FIELDNAME); + RangeQueryBuilder rangeQuery = new RangeQueryBuilder("fieldName") { + @Override + public QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException { + return new MatchNoneQueryBuilder(); + } + }; + sortBuilder.setNestedPath("path").setNestedFilter(rangeQuery); + FieldSortBuilder rewritten = (FieldSortBuilder) sortBuilder + .rewrite(createMockShardContext()); + assertNotSame(rangeQuery, rewritten.getNestedFilter()); + } + + /** + * Test the the nested sort gets rewritten + */ + public void testNestedSortRewrites() throws IOException { + FieldSortBuilder sortBuilder = new FieldSortBuilder(MAPPED_STRING_FIELDNAME); + RangeQueryBuilder rangeQuery = new RangeQueryBuilder("fieldName") { + @Override + public QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException { + return new MatchNoneQueryBuilder(); + } + }; + sortBuilder.setNestedSort(new NestedSortBuilder("path").setFilter(rangeQuery)); + FieldSortBuilder rewritten = (FieldSortBuilder) sortBuilder + .rewrite(createMockShardContext()); + assertNotSame(rangeQuery, rewritten.getNestedSort().getFilter()); + } + + @Override + protected void assertWarnings(FieldSortBuilder testItem) { + List expectedWarnings = new ArrayList<>(); + if (testItem.getNestedFilter() != null) { + expectedWarnings.add("[nested_filter] has been deprecated in favour for the [nested] parameter"); + } + if (testItem.getNestedPath() != null) { + expectedWarnings.add("[nested_path] has been deprecated in favor of the [nested] parameter"); + } + if (expectedWarnings.isEmpty() == false) { + assertWarnings(expectedWarnings.toArray(new String[expectedWarnings.size()])); + } + } + @Override protected FieldSortBuilder fromXContent(XContentParser parser, String fieldName) throws IOException { return FieldSortBuilder.fromXContent(parser, fieldName); diff --git a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java index dc2e4ec17ea0f..eba2904d7705a 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java @@ -21,25 +21,41 @@ import org.apache.lucene.document.LatLonDocValuesField; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TermQuery; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource; +import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.mapper.GeoPointFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.index.query.GeoValidationMethod; import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.MatchNoneQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.test.geo.RandomGeoGenerator; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.instanceOf; public class GeoDistanceSortBuilderTests extends AbstractSortTestCase { @@ -87,15 +103,24 @@ public static GeoDistanceSortBuilder randomGeoDistanceSortBuilder() { result.sortMode(randomValueOtherThan(SortMode.SUM, () -> randomFrom(SortMode.values()))); } if (randomBoolean()) { - // don't fully randomize here, GeoDistanceSort is picky about the filters that are allowed - NestedSortBuilder nestedSort = new NestedSortBuilder(randomAlphaOfLengthBetween(3, 10)); - nestedSort.setFilter(new MatchAllQueryBuilder()); - result.setNestedSort(nestedSort); + result.validation(randomValueOtherThan(result.validation(), () -> randomFrom(GeoValidationMethod.values()))); } if (randomBoolean()) { - result.validation(randomValueOtherThan(result.validation(), () -> randomFrom(GeoValidationMethod.values()))); + if (randomBoolean()) { + // don't fully randomize here, GeoDistanceSort is picky about the filters that are allowed + NestedSortBuilder nestedSort = new NestedSortBuilder(randomAlphaOfLengthBetween(3, 10)); + nestedSort.setFilter(new MatchAllQueryBuilder()); + result.setNestedSort(nestedSort); + } else { + // the following are alternative ways to setNestedSort for nested sorting + if (randomBoolean()) { + result.setNestedFilter(new MatchAllQueryBuilder()); + } + if (randomBoolean()) { + result.setNestedPath(randomAlphaOfLengthBetween(1, 10)); + } + } } - return result; } @@ -155,7 +180,16 @@ protected GeoDistanceSortBuilder mutate(GeoDistanceSortBuilder original) throws () -> randomFrom(SortMode.values()))); break; case 6: - result.setNestedSort(randomValueOtherThan(original.getNestedSort(), () -> NestedSortBuilderTests.createRandomNestedSort(3))); + if (original.getNestedPath() == null && original.getNestedFilter() == null) { + result.setNestedSort( + randomValueOtherThan(original.getNestedSort(), () -> NestedSortBuilderTests.createRandomNestedSort(3))); + } else { + if (randomBoolean()) { + result.setNestedPath(randomValueOtherThan(original.getNestedPath(), () -> randomAlphaOfLengthBetween(1, 10))); + } else { + result.setNestedFilter(randomValueOtherThan(original.getNestedFilter(), () -> randomNestedFilter())); + } + } break; case 7: result.validation(randomValueOtherThan(result.validation(), () -> randomFrom(GeoValidationMethod.values()))); @@ -345,6 +379,21 @@ private GeoDistanceSortBuilder parse(XContentBuilder sortBuilder) throws Excepti return GeoDistanceSortBuilder.fromXContent(parser, null); } + @Override + protected void assertWarnings(GeoDistanceSortBuilder testItem) { + List expectedWarnings = new ArrayList<>(); + if (testItem.getNestedFilter() != null) { + expectedWarnings.add("[nested_filter] has been deprecated in favour of the [nested] parameter"); + } + if (testItem.getNestedPath() != null) { + expectedWarnings.add("[nested_path] has been deprecated in favour of the [nested] parameter"); + } + if (expectedWarnings.isEmpty() == false) { + assertWarnings(expectedWarnings.toArray(new String[expectedWarnings.size()])); + } + } + + @Override protected GeoDistanceSortBuilder fromXContent(XContentParser parser, String fieldName) throws IOException { return GeoDistanceSortBuilder.fromXContent(parser, fieldName); @@ -385,4 +434,191 @@ public void testCommonCaseIsOptimized() throws IOException { sort = builder.build(context); assertEquals(SortField.class, sort.field.getClass()); // can't use LatLon optimized sorting with DESC sorting } + + /** + * Test that the sort builder order gets transfered correctly to the SortField + */ + public void testBuildSortFieldOrder() throws IOException { + QueryShardContext shardContextMock = createMockShardContext(); + GeoDistanceSortBuilder geoDistanceSortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0); + assertEquals(false, geoDistanceSortBuilder.build(shardContextMock).field.getReverse()); + + geoDistanceSortBuilder.order(SortOrder.ASC); + assertEquals(false, geoDistanceSortBuilder.build(shardContextMock).field.getReverse()); + + geoDistanceSortBuilder.order(SortOrder.DESC); + assertEquals(true, geoDistanceSortBuilder.build(shardContextMock).field.getReverse()); + } + + /** + * Test that the sort builder mode gets transfered correctly to the SortField + */ + public void testMultiValueMode() throws IOException { + QueryShardContext shardContextMock = createMockShardContext(); + GeoDistanceSortBuilder geoDistanceSortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0); + geoDistanceSortBuilder.sortMode(SortMode.MAX); + SortField sortField = geoDistanceSortBuilder.build(shardContextMock).field; + assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class)); + XFieldComparatorSource comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); + assertEquals(MultiValueMode.MAX, comparatorSource.sortMode()); + + // also use MultiValueMode.Max if no Mode set but order is DESC + geoDistanceSortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0); + geoDistanceSortBuilder.order(SortOrder.DESC); + sortField = geoDistanceSortBuilder.build(shardContextMock).field; + assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class)); + comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); + assertEquals(MultiValueMode.MAX, comparatorSource.sortMode()); + + // use MultiValueMode.Min if no Mode and order is ASC + geoDistanceSortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0); + // need to use distance unit other than Meters to not get back a LatLonPointSortField + geoDistanceSortBuilder.order(SortOrder.ASC).unit(DistanceUnit.INCH); + sortField = geoDistanceSortBuilder.build(shardContextMock).field; + assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class)); + comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); + assertEquals(MultiValueMode.MIN, comparatorSource.sortMode()); + + geoDistanceSortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0); + // need to use distance unit other than Meters to not get back a LatLonPointSortField + geoDistanceSortBuilder.sortMode(SortMode.MIN).unit(DistanceUnit.INCH); + sortField = geoDistanceSortBuilder.build(shardContextMock).field; + assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class)); + comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); + assertEquals(MultiValueMode.MIN, comparatorSource.sortMode()); + + geoDistanceSortBuilder.sortMode(SortMode.AVG); + sortField = geoDistanceSortBuilder.build(shardContextMock).field; + assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class)); + comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); + assertEquals(MultiValueMode.AVG, comparatorSource.sortMode()); + + geoDistanceSortBuilder.sortMode(SortMode.MEDIAN); + sortField = geoDistanceSortBuilder.build(shardContextMock).field; + assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class)); + comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); + assertEquals(MultiValueMode.MEDIAN, comparatorSource.sortMode()); + } + + /** + * Test that the sort builder nested object gets created in the SortField + */ + public void testBuildNested() throws IOException { + QueryShardContext shardContextMock = createMockShardContext(); + + GeoDistanceSortBuilder sortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0) + .setNestedSort(new NestedSortBuilder("path").setFilter(QueryBuilders.matchAllQuery())); + SortField sortField = sortBuilder.build(shardContextMock).field; + assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class)); + XFieldComparatorSource comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); + Nested nested = comparatorSource.nested(); + assertNotNull(nested); + assertEquals(new MatchAllDocsQuery(), nested.getInnerQuery()); + + sortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0).setNestedPath("path"); + sortField = sortBuilder.build(shardContextMock).field; + assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class)); + comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); + nested = comparatorSource.nested(); + assertNotNull(nested); + assertEquals(new TermQuery(new Term(TypeFieldMapper.NAME, "__path")), nested.getInnerQuery()); + + sortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0).setNestedPath("path") + .setNestedFilter(QueryBuilders.matchAllQuery()); + sortField = sortBuilder.build(shardContextMock).field; + assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class)); + comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); + nested = comparatorSource.nested(); + assertNotNull(nested); + assertEquals(new MatchAllDocsQuery(), nested.getInnerQuery()); + + // if nested path is missing, we omit any filter and return a regular SortField + // (LatLonSortField) + sortBuilder = new GeoDistanceSortBuilder("fieldName", 1.0, 1.0).setNestedFilter(QueryBuilders.termQuery("fieldName", "value")); + sortField = sortBuilder.build(shardContextMock).field; + assertThat(sortField, instanceOf(SortField.class)); + } + + /** + * Test that if coercion is used, a point gets normalized but the original values in the builder are unchanged + */ + public void testBuildCoerce() throws IOException { + QueryShardContext shardContextMock = createMockShardContext(); + GeoDistanceSortBuilder sortBuilder = new GeoDistanceSortBuilder("fieldName", -180.0, -360.0); + sortBuilder.validation(GeoValidationMethod.COERCE); + assertEquals(-180.0, sortBuilder.points()[0].getLat(), 0.0); + assertEquals(-360.0, sortBuilder.points()[0].getLon(), 0.0); + SortField sortField = sortBuilder.build(shardContextMock).field; + assertEquals(LatLonDocValuesField.newDistanceSort("fieldName", 0.0, 180.0), sortField); + } + + /** + * Test that if validation is strict, invalid points throw an error + */ + public void testBuildInvalidPoints() throws IOException { + QueryShardContext shardContextMock = createMockShardContext(); + { + GeoDistanceSortBuilder sortBuilder = new GeoDistanceSortBuilder("fieldName", -180.0, 0.0); + sortBuilder.validation(GeoValidationMethod.STRICT); + ElasticsearchParseException ex = expectThrows(ElasticsearchParseException.class, () -> sortBuilder.build(shardContextMock)); + assertEquals("illegal latitude value [-180.0] for [GeoDistanceSort] for field [fieldName].", ex.getMessage()); + } + { + GeoDistanceSortBuilder sortBuilder = new GeoDistanceSortBuilder("fieldName", 0.0, -360.0); + sortBuilder.validation(GeoValidationMethod.STRICT); + ElasticsearchParseException ex = expectThrows(ElasticsearchParseException.class, () -> sortBuilder.build(shardContextMock)); + assertEquals("illegal longitude value [-360.0] for [GeoDistanceSort] for field [fieldName].", ex.getMessage()); + } + } + + /** + * Test we can either set nested sort via path/filter or via nested sort builder, not both + */ + public void testNestedSortBothThrows() throws IOException { + GeoDistanceSortBuilder sortBuilder = new GeoDistanceSortBuilder("fieldName", 0.0, 0.0); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, + () -> sortBuilder.setNestedPath("nestedPath").setNestedSort(new NestedSortBuilder("otherPath"))); + assertEquals("Setting both nested_path/nested_filter and nested not allowed", iae.getMessage()); + iae = expectThrows(IllegalArgumentException.class, + () -> sortBuilder.setNestedSort(new NestedSortBuilder("otherPath")).setNestedPath("nestedPath")); + assertEquals("Setting both nested_path/nested_filter and nested not allowed", iae.getMessage()); + iae = expectThrows(IllegalArgumentException.class, + () -> sortBuilder.setNestedSort(new NestedSortBuilder("otherPath")).setNestedFilter(QueryBuilders.matchAllQuery())); + assertEquals("Setting both nested_path/nested_filter and nested not allowed", iae.getMessage()); + } + + /** + * Test the the nested Filter gets rewritten + */ + public void testNestedRewrites() throws IOException { + GeoDistanceSortBuilder sortBuilder = new GeoDistanceSortBuilder("fieldName", 0.0, 0.0); + RangeQueryBuilder rangeQuery = new RangeQueryBuilder("fieldName") { + @Override + public QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException { + return new MatchNoneQueryBuilder(); + } + }; + sortBuilder.setNestedPath("path").setNestedFilter(rangeQuery); + GeoDistanceSortBuilder rewritten = (GeoDistanceSortBuilder) sortBuilder + .rewrite(createMockShardContext()); + assertNotSame(rangeQuery, rewritten.getNestedFilter()); + } + + /** + * Test the the nested sort gets rewritten + */ + public void testNestedSortRewrites() throws IOException { + GeoDistanceSortBuilder sortBuilder = new GeoDistanceSortBuilder("fieldName", 0.0, 0.0); + RangeQueryBuilder rangeQuery = new RangeQueryBuilder("fieldName") { + @Override + public QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException { + return new MatchNoneQueryBuilder(); + } + }; + sortBuilder.setNestedSort(new NestedSortBuilder("path").setFilter(rangeQuery)); + GeoDistanceSortBuilder rewritten = (GeoDistanceSortBuilder) sortBuilder + .rewrite(createMockShardContext()); + assertNotSame(rangeQuery, rewritten.getNestedSort().getFilter()); + } + } diff --git a/core/src/test/java/org/elasticsearch/search/sort/NestedSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/NestedSortBuilderTests.java index 5e94cd1c7fd49..0908d83896f92 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/NestedSortBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/NestedSortBuilderTests.java @@ -27,11 +27,17 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.ConstantScoreQueryBuilder; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.MatchNoneQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.EqualsHashCodeTestUtils; import org.junit.AfterClass; import org.junit.BeforeClass; +import org.mockito.Mockito; import java.io.IOException; @@ -136,4 +142,64 @@ public void testEqualsAndHashcode() { NestedSortBuilderTests::mutate); } } + + /** + * Test that filters and inner nested sorts get rewritten + */ + public void testRewrite() throws IOException { + QueryBuilder filterThatRewrites = new MatchNoneQueryBuilder() { + @Override + protected QueryBuilder doRewrite(org.elasticsearch.index.query.QueryRewriteContext queryShardContext) throws IOException { + return new MatchAllQueryBuilder(); + }; + }; + // test that filter gets rewritten + NestedSortBuilder original = new NestedSortBuilder("path").setFilter(filterThatRewrites); + QueryRewriteContext mockRewriteContext = Mockito.mock(QueryRewriteContext.class); + NestedSortBuilder rewritten = original.rewrite(mockRewriteContext); + assertNotSame(rewritten, original); + assertNotSame(rewritten.getFilter(), original.getFilter()); + + // test that inner nested sort gets rewritten + original = new NestedSortBuilder("path"); + original.setNestedSort(new NestedSortBuilder("otherPath").setFilter(filterThatRewrites)); + rewritten = original.rewrite(mockRewriteContext); + assertNotSame(rewritten, original); + assertNotSame(rewritten.getNestedSort(), original.getNestedSort()); + + // test that both filter and inner nested sort get rewritten + original = new NestedSortBuilder("path"); + original.setFilter(filterThatRewrites); + original.setNestedSort(new NestedSortBuilder("otherPath").setFilter(filterThatRewrites)); + rewritten = original.rewrite(mockRewriteContext); + assertNotSame(rewritten, original); + assertNotSame(rewritten.getFilter(), original.getFilter()); + assertNotSame(rewritten.getNestedSort(), original.getNestedSort()); + + // test that original stays unchanged if no element rewrites + original = new NestedSortBuilder("path"); + original.setFilter(new MatchNoneQueryBuilder()); + original.setNestedSort(new NestedSortBuilder("otherPath").setFilter(new MatchNoneQueryBuilder())); + rewritten = original.rewrite(mockRewriteContext); + assertSame(rewritten, original); + assertSame(rewritten.getFilter(), original.getFilter()); + assertSame(rewritten.getNestedSort(), original.getNestedSort()); + + // test that rewrite works recursively + original = new NestedSortBuilder("firstLevel"); + ConstantScoreQueryBuilder constantScoreQueryBuilder = new ConstantScoreQueryBuilder(filterThatRewrites); + original.setFilter(constantScoreQueryBuilder); + NestedSortBuilder nestedSortThatRewrites = new NestedSortBuilder("thirdLevel") + .setFilter(filterThatRewrites); + original.setNestedSort(new NestedSortBuilder("secondLevel").setNestedSort(nestedSortThatRewrites)); + rewritten = original.rewrite(mockRewriteContext); + assertNotSame(rewritten, original); + assertNotSame(rewritten.getFilter(), constantScoreQueryBuilder); + assertNotSame(((ConstantScoreQueryBuilder) rewritten.getFilter()).innerQuery(), constantScoreQueryBuilder.innerQuery()); + + assertEquals("secondLevel", rewritten.getNestedSort().getPath()); + assertNotSame(rewritten.getNestedSort(), original.getNestedSort()); + assertEquals("thirdLevel", rewritten.getNestedSort().getNestedSort().getPath()); + assertNotSame(rewritten.getNestedSort().getNestedSort(), nestedSortThatRewrites); + } } diff --git a/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java index a38fbe77cf6e1..00ba365d8be79 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java @@ -20,12 +20,27 @@ package org.elasticsearch.search.sort; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TermQuery; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource; +import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; +import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; +import org.elasticsearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource; +import org.elasticsearch.index.mapper.TypeFieldMapper; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.query.MatchNoneQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.sort.ScriptSortBuilder.ScriptSortType; import java.io.IOException; @@ -34,6 +49,7 @@ import java.util.Set; import static org.elasticsearch.search.sort.NestedSortBuilderTests.createRandomNestedSort; +import static org.hamcrest.Matchers.instanceOf; public class ScriptSortBuilderTests extends AbstractSortTestCase { @@ -44,7 +60,7 @@ protected ScriptSortBuilder createTestItem() { public static ScriptSortBuilder randomScriptSortBuilder() { ScriptSortType type = randomBoolean() ? ScriptSortType.NUMBER : ScriptSortType.STRING; - ScriptSortBuilder builder = new ScriptSortBuilder(mockScript("dummy"), + ScriptSortBuilder builder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), type); if (randomBoolean()) { builder.order(randomFrom(SortOrder.values())); @@ -237,12 +253,146 @@ public void testParseUnexpectedToken() throws IOException { * script sort of type {@link ScriptSortType} does not work with {@link SortMode#AVG}, {@link SortMode#MEDIAN} or {@link SortMode#SUM} */ public void testBadSortMode() throws IOException { - ScriptSortBuilder builder = new ScriptSortBuilder(mockScript("something"), ScriptSortType.STRING); + ScriptSortBuilder builder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.STRING); String sortMode = randomFrom(new String[] { "avg", "median", "sum" }); Exception e = expectThrows(IllegalArgumentException.class, () -> builder.sortMode(SortMode.fromString(sortMode))); assertEquals("script sort of type [string] doesn't support mode [" + sortMode + "]", e.getMessage()); } + /** + * Test that the sort builder mode gets transfered correctly to the SortField + */ + public void testMultiValueMode() throws IOException { + QueryShardContext shardContextMock = createMockShardContext(); + for (SortMode mode : SortMode.values()) { + ScriptSortBuilder sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.NUMBER); + sortBuilder.sortMode(mode); + SortField sortField = sortBuilder.build(shardContextMock).field; + assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class)); + XFieldComparatorSource comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); + assertEquals(MultiValueMode.fromString(mode.toString()), comparatorSource.sortMode()); + } + + // check that without mode set, order ASC sets mode to MIN, DESC to MAX + ScriptSortBuilder sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.NUMBER); + sortBuilder.order(SortOrder.ASC); + SortField sortField = sortBuilder.build(shardContextMock).field; + assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class)); + XFieldComparatorSource comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); + assertEquals(MultiValueMode.MIN, comparatorSource.sortMode()); + + sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.NUMBER); + sortBuilder.order(SortOrder.DESC); + sortField = sortBuilder.build(shardContextMock).field; + assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class)); + comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); + assertEquals(MultiValueMode.MAX, comparatorSource.sortMode()); + } + + /** + * Test that the correct comparator sort is returned, based on the script type + */ + public void testBuildCorrectComparatorType() throws IOException { + ScriptSortBuilder sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.STRING); + SortField sortField = sortBuilder.build(createMockShardContext()).field; + assertThat(sortField.getComparatorSource(), instanceOf(BytesRefFieldComparatorSource.class)); + + sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.NUMBER); + sortField = sortBuilder.build(createMockShardContext()).field; + assertThat(sortField.getComparatorSource(), instanceOf(DoubleValuesComparatorSource.class)); + } + + /** + * Test that the sort builder nested object gets created in the SortField + */ + public void testBuildNested() throws IOException { + QueryShardContext shardContextMock = createMockShardContext(); + + ScriptSortBuilder sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.NUMBER) + .setNestedSort(new NestedSortBuilder("path").setFilter(QueryBuilders.matchAllQuery())); + SortField sortField = sortBuilder.build(shardContextMock).field; + assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class)); + XFieldComparatorSource comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); + Nested nested = comparatorSource.nested(); + assertNotNull(nested); + assertEquals(new MatchAllDocsQuery(), nested.getInnerQuery()); + + sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.NUMBER).setNestedPath("path"); + sortField = sortBuilder.build(shardContextMock).field; + assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class)); + comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); + nested = comparatorSource.nested(); + assertNotNull(nested); + assertEquals(new TermQuery(new Term(TypeFieldMapper.NAME, "__path")), nested.getInnerQuery()); + + sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.NUMBER).setNestedPath("path") + .setNestedFilter(QueryBuilders.matchAllQuery()); + sortField = sortBuilder.build(shardContextMock).field; + assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class)); + comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); + nested = comparatorSource.nested(); + assertNotNull(nested); + assertEquals(new MatchAllDocsQuery(), nested.getInnerQuery()); + + // if nested path is missing, we omit nested element in the comparator + sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.NUMBER) + .setNestedFilter(QueryBuilders.matchAllQuery()); + sortField = sortBuilder.build(shardContextMock).field; + assertThat(sortField.getComparatorSource(), instanceOf(XFieldComparatorSource.class)); + comparatorSource = (XFieldComparatorSource) sortField.getComparatorSource(); + assertNull(comparatorSource.nested()); + } + + /** + * Test we can either set nested sort via path/filter or via nested sort builder, not both + */ + public void testNestedSortBothThrows() throws IOException { + ScriptSortBuilder sortBuilder = new ScriptSortBuilder(mockScript(MOCK_SCRIPT_NAME), ScriptSortType.NUMBER); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, + () -> sortBuilder.setNestedPath("nestedPath").setNestedSort(new NestedSortBuilder("otherPath"))); + assertEquals("Setting both nested_path/nested_filter and nested not allowed", iae.getMessage()); + iae = expectThrows(IllegalArgumentException.class, + () -> sortBuilder.setNestedSort(new NestedSortBuilder("otherPath")).setNestedPath("nestedPath")); + assertEquals("Setting both nested_path/nested_filter and nested not allowed", iae.getMessage()); + iae = expectThrows(IllegalArgumentException.class, + () -> sortBuilder.setNestedSort(new NestedSortBuilder("otherPath")).setNestedFilter(QueryBuilders.matchAllQuery())); + assertEquals("Setting both nested_path/nested_filter and nested not allowed", iae.getMessage()); + } + + /** + * Test the the nested Filter gets rewritten + */ + public void testNestedRewrites() throws IOException { + ScriptSortBuilder sortBuilder = new ScriptSortBuilder(mockScript("something"), ScriptSortType.STRING); + RangeQueryBuilder rangeQuery = new RangeQueryBuilder("fieldName") { + @Override + public QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException { + return new MatchNoneQueryBuilder(); + } + }; + sortBuilder.setNestedPath("path").setNestedFilter(rangeQuery); + ScriptSortBuilder rewritten = (ScriptSortBuilder) sortBuilder + .rewrite(createMockShardContext()); + assertNotSame(rangeQuery, rewritten.getNestedFilter()); + } + + /** + * Test the the nested sort gets rewritten + */ + public void testNestedSortRewrites() throws IOException { + ScriptSortBuilder sortBuilder = new ScriptSortBuilder(mockScript("something"), ScriptSortType.STRING); + RangeQueryBuilder rangeQuery = new RangeQueryBuilder("fieldName") { + @Override + public QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException { + return new MatchNoneQueryBuilder(); + } + }; + sortBuilder.setNestedSort(new NestedSortBuilder("path").setFilter(rangeQuery)); + ScriptSortBuilder rewritten = (ScriptSortBuilder) sortBuilder + .rewrite(createMockShardContext()); + assertNotSame(rangeQuery, rewritten.getNestedSort().getFilter()); + } + @Override protected ScriptSortBuilder fromXContent(XContentParser parser, String fieldName) throws IOException { return ScriptSortBuilder.fromXContent(parser, fieldName); diff --git a/core/src/test/java/org/elasticsearch/search/sort/SortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/SortBuilderTests.java index 252c55d84137f..06f5ccf696ce4 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/SortBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/SortBuilderTests.java @@ -34,8 +34,10 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Set; import static java.util.Collections.emptyList; @@ -126,10 +128,11 @@ public void testSingleFieldSort() throws IOException { } /** - * test random syntax variations + * test parsing random syntax variations */ public void testRandomSortBuilders() throws IOException { for (int runs = 0; runs < NUMBER_OF_RUNS; runs++) { + SetexpectedWarningHeaders = new HashSet<>(); List> testBuilders = randomSortBuilderList(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder(); xContentBuilder.startObject(); @@ -139,6 +142,16 @@ public void testRandomSortBuilders() throws IOException { xContentBuilder.field("sort"); } for (SortBuilder builder : testBuilders) { + if (builder instanceof GeoDistanceSortBuilder) { + GeoDistanceSortBuilder gdsb = (GeoDistanceSortBuilder) builder; + if (gdsb.getNestedFilter() != null) { + expectedWarningHeaders.add("[nested_filter] has been deprecated in favour of the [nested] parameter"); + } + if (gdsb.getNestedPath() != null) { + expectedWarningHeaders.add("[nested_path] has been deprecated in favour of the [nested] parameter"); + } + } + if (builder instanceof ScoreSortBuilder || builder instanceof FieldSortBuilder) { switch (randomIntBetween(0, 2)) { case 0: @@ -176,6 +189,9 @@ public void testRandomSortBuilders() throws IOException { for (SortBuilder parsedBuilder : parsedSort) { assertEquals(iterator.next(), parsedBuilder); } + if (expectedWarningHeaders.size() > 0) { + assertWarnings(expectedWarningHeaders.toArray(new String[expectedWarningHeaders.size()])); + } } } diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index 5bd2bad31d134..3cbee6adc4161 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -858,6 +858,38 @@ public void testThatIndexingInvalidFieldsInCompletionFieldResultsInException() t } } + public void testSkipDuplicates() throws Exception { + final CompletionMappingBuilder mapping = new CompletionMappingBuilder(); + createIndexAndMapping(mapping); + int numDocs = randomIntBetween(10, 100); + int numUnique = randomIntBetween(1, numDocs); + List indexRequestBuilders = new ArrayList<>(); + for (int i = 1; i <= numDocs; i++) { + int id = i % numUnique; + indexRequestBuilders.add(client().prepareIndex(INDEX, TYPE, "" + i) + .setSource(jsonBuilder() + .startObject() + .startObject(FIELD) + .field("input", "suggestion" + id) + .field("weight", id) + .endObject() + .endObject() + )); + } + String[] expected = new String[numUnique]; + int sugg = numUnique - 1; + for (int i = 0; i < numUnique; i++) { + expected[i] = "suggestion" + sugg--; + } + indexRandom(true, indexRequestBuilders); + CompletionSuggestionBuilder completionSuggestionBuilder = + SuggestBuilders.completionSuggestion(FIELD).prefix("sugg").skipDuplicates(true).size(numUnique); + + SearchResponse searchResponse = client().prepareSearch(INDEX) + .suggest(new SuggestBuilder().addSuggestion("suggestions", completionSuggestionBuilder)).execute().actionGet(); + assertSuggestions(searchResponse, true, "suggestions", expected); + } + public void assertSuggestions(String suggestionName, SuggestionBuilder suggestBuilder, String... suggestions) { SearchResponse searchResponse = client().prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion(suggestionName, suggestBuilder)).execute().actionGet(); assertSuggestions(searchResponse, suggestionName, suggestions); @@ -1108,6 +1140,28 @@ public void testIndexingUnrelatedNullValue() throws Exception { } } + public void testMultiDocSuggestions() throws Exception { + final CompletionMappingBuilder mapping = new CompletionMappingBuilder(); + createIndexAndMapping(mapping); + int numDocs = 10; + List indexRequestBuilders = new ArrayList<>(); + for (int i = 1; i <= numDocs; i++) { + indexRequestBuilders.add(client().prepareIndex(INDEX, TYPE, "" + i) + .setSource(jsonBuilder() + .startObject() + .startObject(FIELD) + .array("input", "suggestion" + i, "suggestions" + i, "suggester" + i) + .field("weight", i) + .endObject() + .endObject() + )); + } + indexRandom(true, indexRequestBuilders); + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg").shardSize(15); + assertSuggestions("foo", prefix, "suggester10", "suggester9", "suggester8", "suggester7", "suggester6"); + } + + public static boolean isReservedChar(char c) { switch (c) { case '\u001F': diff --git a/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java b/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java index a9741c3170008..13f7e55277cc4 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java @@ -502,12 +502,12 @@ public void testGeoBoosting() throws Exception { CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg"); assertSuggestions("foo", prefix, "suggestion9", "suggestion8", "suggestion7", "suggestion6", "suggestion5"); - GeoQueryContext context1 = GeoQueryContext.builder().setGeoPoint(geoPoints[0]).setBoost(2).build(); + GeoQueryContext context1 = GeoQueryContext.builder().setGeoPoint(geoPoints[0]).setBoost(11).build(); GeoQueryContext context2 = GeoQueryContext.builder().setGeoPoint(geoPoints[1]).build(); CompletionSuggestionBuilder geoBoostingPrefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg") .contexts(Collections.singletonMap("geo", Arrays.asList(context1, context2))); - assertSuggestions("foo", geoBoostingPrefix, "suggestion8", "suggestion6", "suggestion4", "suggestion9", "suggestion7"); + assertSuggestions("foo", geoBoostingPrefix, "suggestion8", "suggestion6", "suggestion4", "suggestion2", "suggestion0"); } public void testGeoPointContext() throws Exception { @@ -639,6 +639,50 @@ public void testGeoField() throws Exception { assertEquals("Hotel Amsterdam in Berlin", searchResponse.getSuggest().getSuggestion(suggestionName).iterator().next().getOptions().iterator().next().getText().string()); } + public void testSkipDuplicatesWithContexts() throws Exception { + LinkedHashMap map = new LinkedHashMap<>(); + map.put("type", ContextBuilder.category("type").field("type").build()); + map.put("cat", ContextBuilder.category("cat").field("cat").build()); + final CompletionMappingBuilder mapping = new CompletionMappingBuilder().context(map); + createIndexAndMapping(mapping); + int numDocs = randomIntBetween(10, 100); + int numUnique = randomIntBetween(1, numDocs); + List indexRequestBuilders = new ArrayList<>(); + for (int i = 0; i < numDocs; i++) { + int id = i % numUnique; + XContentBuilder source = jsonBuilder() + .startObject() + .startObject(FIELD) + .field("input", "suggestion" + id) + .field("weight", id) + .endObject() + .field("cat", "cat" + id % 2) + .field("type", "type" + id) + .endObject(); + indexRequestBuilders.add(client().prepareIndex(INDEX, TYPE, "" + i) + .setSource(source)); + } + String[] expected = new String[numUnique]; + for (int i = 0; i < numUnique; i++) { + expected[i] = "suggestion" + (numUnique-1-i); + } + indexRandom(true, indexRequestBuilders); + CompletionSuggestionBuilder completionSuggestionBuilder = + SuggestBuilders.completionSuggestion(FIELD).prefix("sugg").skipDuplicates(true).size(numUnique); + + assertSuggestions("suggestions", completionSuggestionBuilder, expected); + + Map> contextMap = new HashMap<>(); + contextMap.put("cat", Arrays.asList(CategoryQueryContext.builder().setCategory("cat0").build())); + completionSuggestionBuilder = + SuggestBuilders.completionSuggestion(FIELD).prefix("sugg").contexts(contextMap).skipDuplicates(true).size(numUnique); + + String[] expectedModulo = Arrays.stream(expected) + .filter((s) -> Integer.parseInt(s.substring("suggestion".length())) % 2 == 0) + .toArray(String[]::new); + assertSuggestions("suggestions", completionSuggestionBuilder, expectedModulo); + } + public void assertSuggestions(String suggestionName, SuggestionBuilder suggestBuilder, String... suggestions) { SearchResponse searchResponse = client().prepareSearch(INDEX).suggest( new SuggestBuilder().addSuggestion(suggestionName, suggestBuilder) diff --git a/core/src/test/java/org/elasticsearch/search/suggest/SuggestTests.java b/core/src/test/java/org/elasticsearch/search/suggest/SuggestTests.java index f1a630fab3742..d53cbfdab6e80 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/SuggestTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/SuggestTests.java @@ -139,7 +139,7 @@ public void testToXContent() throws IOException { public void testFilter() throws Exception { List>> suggestions; - CompletionSuggestion completionSuggestion = new CompletionSuggestion(randomAlphaOfLength(10), 2); + CompletionSuggestion completionSuggestion = new CompletionSuggestion(randomAlphaOfLength(10), 2, false); PhraseSuggestion phraseSuggestion = new PhraseSuggestion(randomAlphaOfLength(10), 2); TermSuggestion termSuggestion = new TermSuggestion(randomAlphaOfLength(10), 2, SortBy.SCORE); suggestions = Arrays.asList(completionSuggestion, phraseSuggestion, termSuggestion); @@ -160,7 +160,7 @@ public void testSuggestionOrdering() throws Exception { suggestions = new ArrayList<>(); int n = randomIntBetween(2, 5); for (int i = 0; i < n; i++) { - suggestions.add(new CompletionSuggestion(randomAlphaOfLength(10), randomIntBetween(3, 5))); + suggestions.add(new CompletionSuggestion(randomAlphaOfLength(10), randomIntBetween(3, 5), false)); } Collections.shuffle(suggestions, random()); Suggest suggest = new Suggest(suggestions); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/SuggestionTests.java b/core/src/test/java/org/elasticsearch/search/suggest/SuggestionTests.java index 3c56597299dd3..70c4396ce8867 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/SuggestionTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/SuggestionTests.java @@ -79,7 +79,7 @@ public static Suggestion> createTestItem(Class suggestion = new PhraseSuggestion(name, size); entrySupplier = () -> SuggestionEntryTests.createTestItem(PhraseSuggestion.Entry.class); } else if (type == CompletionSuggestion.class) { - suggestion = new CompletionSuggestion(name, size); + suggestion = new CompletionSuggestion(name, size, randomBoolean()); entrySupplier = () -> SuggestionEntryTests.createTestItem(CompletionSuggestion.Entry.class); } else { throw new UnsupportedOperationException("type not supported [" + type + "]"); @@ -249,7 +249,7 @@ public void testToXContent() throws IOException { CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option(1, new Text("someText"), 1.3f, contexts); CompletionSuggestion.Entry entry = new CompletionSuggestion.Entry(new Text("entryText"), 42, 313); entry.addOption(option); - CompletionSuggestion suggestion = new CompletionSuggestion("suggestionName", 5); + CompletionSuggestion suggestion = new CompletionSuggestion("suggestionName", 5, randomBoolean()); suggestion.addTerm(entry); BytesReference xContent = toXContent(suggestion, XContentType.JSON, params, randomBoolean()); assertEquals( @@ -265,4 +265,4 @@ public void testToXContent() throws IOException { + "}]}", xContent.utf8ToString()); } } -} \ No newline at end of file +} diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionSuggesterBuilderTests.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionSuggesterBuilderTests.java index d8eb885823b8e..862916890e1bb 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionSuggesterBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionSuggesterBuilderTests.java @@ -114,6 +114,7 @@ public static CompletionSuggestionBuilder randomCompletionSuggestionBuilder() { contextMap.put(geoQueryContextName, contexts); } testBuilder.contexts(contextMap); + testBuilder.skipDuplicates(randomBoolean()); return testBuilder; } @@ -128,7 +129,7 @@ protected String[] shuffleProtectedFields() { @Override protected void mutateSpecificParameters(CompletionSuggestionBuilder builder) throws IOException { - switch (randomIntBetween(0, 4)) { + switch (randomIntBetween(0, 5)) { case 0: int nCatContext = randomIntBetween(1, 5); List contexts = new ArrayList<>(nCatContext); @@ -154,6 +155,9 @@ protected void mutateSpecificParameters(CompletionSuggestionBuilder builder) thr case 4: builder.regex(randomAlphaOfLength(10), RegexOptionsTests.randomRegexOptions()); break; + case 5: + builder.skipDuplicates(!builder.skipDuplicates); + break; default: throw new IllegalStateException("should not through"); } @@ -182,5 +186,6 @@ protected void assertSuggestionContext(CompletionSuggestionBuilder builder, Sugg assertEquals(parsedContextBytes.get(contextName), queryContexts.get(contextName)); } assertEquals(builder.regexOptions, completionSuggestionCtx.getRegexOptions()); + assertEquals(builder.skipDuplicates, completionSuggestionCtx.isSkipDuplicates()); } } diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionTests.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionTests.java index 4b0e60a1d00db..2a5a89bdde332 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -38,7 +39,7 @@ public void testToReduce() throws Exception { String name = randomAlphaOfLength(10); int size = randomIntBetween(3, 5); for (int i = 0; i < nShards; i++) { - CompletionSuggestion suggestion = new CompletionSuggestion(name, size); + CompletionSuggestion suggestion = new CompletionSuggestion(name, size, false); suggestion.addTerm(new CompletionSuggestion.Entry(new Text(""), 0, 0)); shardSuggestions.add(suggestion); } diff --git a/docs/java-api/docs/index_.asciidoc b/docs/java-api/docs/index_.asciidoc index 0b91622dd8174..b32955d9d4f50 100644 --- a/docs/java-api/docs/index_.asciidoc +++ b/docs/java-api/docs/index_.asciidoc @@ -140,7 +140,7 @@ String json = "{" + "}"; IndexResponse response = client.prepareIndex("twitter", "tweet") - .setSource(json) +       .setSource(json, XContentType.JSON) .get(); -------------------------------------------------- diff --git a/docs/java-rest/high-level/apis/index.asciidoc b/docs/java-rest/high-level/apis/index.asciidoc index 1f5301fdbd4a6..b4dcf7e9d8037 100644 --- a/docs/java-rest/high-level/apis/index.asciidoc +++ b/docs/java-rest/high-level/apis/index.asciidoc @@ -6,5 +6,3 @@ include::bulk.asciidoc[] include::search.asciidoc[] include::scroll.asciidoc[] include::main.asciidoc[] -include::queries.asciidoc[] -include::aggs.asciidoc[] diff --git a/docs/java-rest/high-level/apis/search.asciidoc b/docs/java-rest/high-level/apis/search.asciidoc index 0c0bee985cadc..de336fd60d8e4 100644 --- a/docs/java-rest/high-level/apis/search.asciidoc +++ b/docs/java-rest/high-level/apis/search.asciidoc @@ -82,6 +82,7 @@ After this, the `SearchSourceBuilder` only needs to be added to the include-tagged::{doc-tests}/SearchDocumentationIT.java[search-source-setter] -------------------------------------------------- +[[java-rest-high-document-search-request-building-queries]] ===== Building queries Search queries are created using `QueryBuilder` objects. A `QueryBuilder` exists @@ -125,7 +126,7 @@ to the `SearchSourceBuilder` as follows: include-tagged::{doc-tests}/SearchDocumentationIT.java[search-query-setter] -------------------------------------------------- -The <> page gives a list of all available search queries with +The <> page gives a list of all available search queries with their corresponding `QueryBuilder` objects and `QueryBuilders` helper methods. @@ -178,6 +179,7 @@ setters with a similar name (e.g. `#preTags(String ...)`). Highlighted text fragments can <> from the `SearchResponse`. +[[java-rest-high-document-search-request-building-aggs]] ===== Requesting Aggregations Aggregations can be added to the search by first creating the appropriate @@ -190,7 +192,7 @@ sub-aggregation on the average age of employees in the company: include-tagged::{doc-tests}/SearchDocumentationIT.java[search-request-aggregations] -------------------------------------------------- -The <> page gives a list of all available aggregations with +The <> page gives a list of all available aggregations with their corresponding `AggregationBuilder` objects and `AggregationBuilders` helper methods. We will later see how to <> in the `SearchResponse`. diff --git a/docs/java-rest/high-level/apis/aggs.asciidoc b/docs/java-rest/high-level/builders/aggs.asciidoc similarity index 99% rename from docs/java-rest/high-level/apis/aggs.asciidoc rename to docs/java-rest/high-level/builders/aggs.asciidoc index 101251bee19aa..3b15243f5b2c6 100644 --- a/docs/java-rest/high-level/apis/aggs.asciidoc +++ b/docs/java-rest/high-level/builders/aggs.asciidoc @@ -1,4 +1,4 @@ -[[java-rest-high-aggregations]] +[[java-rest-high-aggregation-builders]] === Building Aggregations This page lists all the available aggregations with their corresponding `AggregationBuilder` class name and helper method name in the diff --git a/docs/java-rest/high-level/apis/queries.asciidoc b/docs/java-rest/high-level/builders/queries.asciidoc similarity index 99% rename from docs/java-rest/high-level/apis/queries.asciidoc rename to docs/java-rest/high-level/builders/queries.asciidoc index b46823fcf7503..88204baa8745d 100644 --- a/docs/java-rest/high-level/apis/queries.asciidoc +++ b/docs/java-rest/high-level/builders/queries.asciidoc @@ -1,5 +1,5 @@ -[[java-rest-high-search-queries]] -=== Building Search Queries +[[java-rest-high-query-builders]] +=== Building Queries This page lists all the available search queries with their corresponding `QueryBuilder` class name and helper method name in the `QueryBuilders` utility class. diff --git a/docs/java-rest/high-level/usage.asciidoc b/docs/java-rest/high-level/getting-started.asciidoc similarity index 94% rename from docs/java-rest/high-level/usage.asciidoc rename to docs/java-rest/high-level/getting-started.asciidoc index fd56f362387c0..86fe473fb29e0 100644 --- a/docs/java-rest/high-level/usage.asciidoc +++ b/docs/java-rest/high-level/getting-started.asciidoc @@ -1,4 +1,4 @@ -[[java-rest-high-usage]] +[[java-rest-high-getting-started]] == Getting started This section describes how to get started with the high-level REST client from @@ -36,7 +36,7 @@ major version. The javadoc for the REST high level client can be found at {rest-high-level-client-javadoc}/index.html. -[[java-rest-high-usage-maven]] +[[java-rest-high-getting-started-maven]] === Maven Repository The high-level Java REST client is hosted on @@ -46,7 +46,7 @@ Central]. The minimum Java version required is `1.8`. The High Level REST Client is subject to the same release cycle as Elasticsearch. Replace the version with the desired client version. -[[java-rest-high-usage-maven-maven]] +[[java-rest-high-getting-started-maven-maven]] ==== Maven configuration Here is how you can configure the dependency using maven as a dependency manager. @@ -61,7 +61,7 @@ Add the following to your `pom.xml` file: -------------------------------------------------- -[[java-rest-high-usage-maven-gradle]] +[[java-rest-high-getting-started-maven-gradle]] ==== Gradle configuration Here is how you can configure the dependency using gradle as a dependency manager. @@ -74,7 +74,7 @@ dependencies { } -------------------------------------------------- -[[java-rest-high-usage-maven-lucene]] +[[java-rest-high-getting-started-maven-lucene]] ==== Lucene Snapshot repository The very first releases of any major version (like a beta), might have been built on top of a Lucene Snapshot version. @@ -105,7 +105,7 @@ maven { } -------------------------------------------------- -[[java-rest-high-usage-dependencies]] +[[java-rest-high-getting-started-dependencies]] === Dependencies The High Level Java REST Client depends on the following artifacts and their @@ -115,7 +115,7 @@ transitive dependencies: - org.elasticsearch:elasticsearch -[[java-rest-high-usage-initialization]] +[[java-rest-high-getting-started-initialization]] === Initialization A `RestHighLevelClient` instance needs a <> diff --git a/docs/java-rest/high-level/index.asciidoc b/docs/java-rest/high-level/index.asciidoc index ec7a1bdaf7d9a..bc4c2dd89bb08 100644 --- a/docs/java-rest/high-level/index.asciidoc +++ b/docs/java-rest/high-level/index.asciidoc @@ -24,14 +24,10 @@ the same response objects. :doc-tests: {docdir}/../../client/rest-high-level/src/test/java/org/elasticsearch/client/documentation -include::usage.asciidoc[] - -include::apis.asciidoc[] - -include::apis/index.asciidoc[] - +include::getting-started.asciidoc[] +include::supported-apis.asciidoc[] +include::java-builders.asciidoc[] include::migration.asciidoc[] - include::../license.asciidoc[] :doc-tests!: diff --git a/docs/java-rest/high-level/java-builders.asciidoc b/docs/java-rest/high-level/java-builders.asciidoc new file mode 100644 index 0000000000000..be5c19293cd30 --- /dev/null +++ b/docs/java-rest/high-level/java-builders.asciidoc @@ -0,0 +1,32 @@ +[[java-rest-high-java-builders]] +== Using Java Builders + +The Java High Level REST Client depends on the Elasticsearch core project which provides +different types of Java `Builders` objects, including: + +Query Builders:: + +The query builders are used to create the query to execute within a search request. There +is a query builder for every type of query supported by the Query DSL. Each query builder +implements the `QueryBuilder` interface and allows to set the specific options for a given +type of query. Once created, the `QueryBuilder` object can be set as the query parameter of +`SearchSourceBuilder`. The <> +page shows an example of how to build a full search request using `SearchSourceBuilder` and +`QueryBuilder` objects. The <> page +gives a list of all available search queries with their corresponding `QueryBuilder` objects +and `QueryBuilders` helper methods. + +Aggregation Builders:: + +Similarly to query builders, the aggregation builders are used to create the aggregations to +compute during a search request execution. There is an aggregation builder for every type of +aggregation (or pipeline aggregation) supported by Elasticsearch. All builders extend the +`AggregationBuilder` class (or `PipelineAggregationBuilder`class). Once created, `AggregationBuilder` +objects can be set as the aggregation parameter of `SearchSourceBuilder`. There is a example +of how `AggregationBuilder` objects are used with `SearchSourceBuilder` objects to define the aggregations +to compute with a search query in <> page. +The <> page gives a list of all available +aggregations with their corresponding `AggregationBuilder` objects and `AggregationBuilders` helper methods. + +include::builders/queries.asciidoc[] +include::builders/aggs.asciidoc[] diff --git a/docs/java-rest/high-level/migration.asciidoc b/docs/java-rest/high-level/migration.asciidoc index 5006607c47a27..7ce0b00bdb07b 100644 --- a/docs/java-rest/high-level/migration.asciidoc +++ b/docs/java-rest/high-level/migration.asciidoc @@ -40,9 +40,9 @@ Java application that uses the `TransportClient` depends on the `org.elasticsearch.client:transport` artifact. This dependency must be replaced by a new dependency on the high-level client. -The <> page shows +The <> page shows typical configurations for Maven and Gradle and presents the - <> brought by the + <> brought by the high-level client. === Changing the client's initialization code diff --git a/docs/java-rest/high-level/apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc similarity index 79% rename from docs/java-rest/high-level/apis.asciidoc rename to docs/java-rest/high-level/supported-apis.asciidoc index b294974bacd75..51411ea9fcaf0 100644 --- a/docs/java-rest/high-level/apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -3,19 +3,21 @@ The Java High Level REST Client supports the following APIs: -.Single document APIs +Single document APIs:: * <> * <> * <> * <> -.Multi-document APIs +Multi document APIs:: * <> -.Search APIs +Search APIs:: * <> * <> * <> -.Miscellaneous APIs +Miscellaneous APIs:: * <> + +include::apis/index.asciidoc[] \ No newline at end of file diff --git a/docs/plugins/repository-azure.asciidoc b/docs/plugins/repository-azure.asciidoc index a7b4dbaa7fbdf..3fc4a5a0cad21 100644 --- a/docs/plugins/repository-azure.asciidoc +++ b/docs/plugins/repository-azure.asciidoc @@ -46,7 +46,7 @@ before retrying after a first timeout or failure. The maximum backoff period is [source,yaml] ---- -cloud.azure.storage.timeout: 10s +azure.client.default.timeout: 10s azure.client.default.max_retries: 7 azure.client.secondary.timeout: 30s ---- diff --git a/docs/reference/aggregations/bucket/nested-aggregation.asciidoc b/docs/reference/aggregations/bucket/nested-aggregation.asciidoc index d8dbd638f1205..5d2d904f0eb7f 100644 --- a/docs/reference/aggregations/bucket/nested-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/nested-aggregation.asciidoc @@ -3,7 +3,7 @@ A special single bucket aggregation that enables aggregating nested documents. -For example, lets say we have a index of products, and each product holds the list of resellers - each having its own +For example, lets say we have an index of products, and each product holds the list of resellers - each having its own price for the product. The mapping could look like: [source,js] diff --git a/docs/reference/aggregations/metrics/extendedstats-aggregation.asciidoc b/docs/reference/aggregations/metrics/extendedstats-aggregation.asciidoc index b71427ae9cb55..6eb2f18928a81 100644 --- a/docs/reference/aggregations/metrics/extendedstats-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/extendedstats-aggregation.asciidoc @@ -109,7 +109,7 @@ GET /exams/_search // CONSOLE // TEST[setup:exams] -This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a file script use the following syntax: +This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a stored script use the following syntax: [source,js] -------------------------------------------------- diff --git a/docs/reference/how-to/search-speed.asciidoc b/docs/reference/how-to/search-speed.asciidoc index c1802ed716a10..60168ab856d8c 100644 --- a/docs/reference/how-to/search-speed.asciidoc +++ b/docs/reference/how-to/search-speed.asciidoc @@ -368,12 +368,3 @@ So what is the right number of replicas? If you have a cluster that has be able to cope with `max_failures` node failures at once at most, then the right number of replicas for you is `max(max_failures, ceil(num_nodes / num_primaries) - 1)`. - -[float] -=== Turn on adaptive replica selection - -When multiple copies of data are present, elasticsearch can use a set of -criteria called <> to select -the best copy of the data based on response time, service time, and queue size -of the node containing each copy of the shard. This can improve query throughput -and reduce latency for search-heavy applications. diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 51d2291c4d87d..5347fd875d798 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -121,6 +121,11 @@ specific index module: <> or <> for a more efficient alternative to raising this. +`index.max_inner_result_window`:: + + The maximum value of `from + size` for inner hits definition and top hits aggregations to this index. Defaults to + `100`. Inner hits and top hits aggregation take heap memory and time proportional to `from + size` and this limits that memory. + `index.max_rescore_window`:: The maximum value of `window_size` for `rescore` requests in searches of this index. diff --git a/docs/reference/mapping/types/percolator.asciidoc b/docs/reference/mapping/types/percolator.asciidoc index 47eb3efc89d45..cdf1c876d5156 100644 --- a/docs/reference/mapping/types/percolator.asciidoc +++ b/docs/reference/mapping/types/percolator.asciidoc @@ -277,6 +277,9 @@ now returns matches from the new index: "body": "quick brown fox" } } + }, + "fields" : { + "_percolator_document_slot" : [0] } } ] @@ -472,6 +475,9 @@ This results in a response like this: } } } + }, + "fields" : { + "_percolator_document_slot" : [0] } } ] @@ -495,9 +501,9 @@ Otherwise percolate queries can be parsed incorrectly. In certain cases it is unknown what kind of percolator queries do get registered, and if no field mapping exists for fields that are referred by percolator queries then adding a percolator query fails. This means the mapping needs to be updated to have the field with the appropriate settings, and then the percolator query can be added. But sometimes it is sufficient -if all unmapped fields are handled as if these were default string fields. In those cases one can configure the -`index.percolator.map_unmapped_fields_as_string` setting to `true` (default to `false`) and then if a field referred in -a percolator query does not exist, it will be handled as a default string field so that adding the percolator query doesn't +if all unmapped fields are handled as if these were default text fields. In those cases one can configure the +`index.percolator.map_unmapped_fields_as_text` setting to `true` (default to `false`) and then if a field referred in +a percolator query does not exist, it will be handled as a default text field so that adding the percolator query doesn't fail. [float] diff --git a/docs/reference/migration/migrate_7_0.asciidoc b/docs/reference/migration/migrate_7_0.asciidoc index ffc3496046ddc..043d62465be39 100644 --- a/docs/reference/migration/migrate_7_0.asciidoc +++ b/docs/reference/migration/migrate_7_0.asciidoc @@ -28,8 +28,12 @@ way to reindex old indices is to use the `reindex` API. * <> * <> * <> +* <> +* <> include::migrate_7_0/aggregations.asciidoc[] include::migrate_7_0/cluster.asciidoc[] include::migrate_7_0/indices.asciidoc[] include::migrate_7_0/mappings.asciidoc[] +include::migrate_7_0/search.asciidoc[] +include::migrate_7_0/plugins.asciidoc[] diff --git a/docs/reference/migration/migrate_7_0/plugins.asciidoc b/docs/reference/migration/migrate_7_0/plugins.asciidoc new file mode 100644 index 0000000000000..6bc9edec0dabc --- /dev/null +++ b/docs/reference/migration/migrate_7_0/plugins.asciidoc @@ -0,0 +1,14 @@ +[[breaking_70_plugins_changes]] +=== Plugins changes + +==== Azure Repository plugin + +* The legacy azure settings which where starting with `cloud.azure.storage.` prefix have been removed. +This includes `account`, `key`, `default` and `timeout`. +You need to use settings which are starting with `azure.client.` prefix instead. + +* Global timeout setting `cloud.azure.storage.timeout` has been removed. +You must set it per azure client instead. Like `azure.client.default.timeout: 10s` for example. + +See {plugins}/repository-azure-usage.html#repository-azure-repository-settings[Azure Repository settings]. + diff --git a/docs/reference/migration/migrate_7_0/search.asciidoc b/docs/reference/migration/migrate_7_0/search.asciidoc new file mode 100644 index 0000000000000..1c63a65c1072a --- /dev/null +++ b/docs/reference/migration/migrate_7_0/search.asciidoc @@ -0,0 +1,20 @@ +[[breaking_70_search_changes]] +=== Search changes + +==== Adaptive replica selection enabled by default + +Adaptive replica selection has been enabled by default. If you wish to return to +the older round robin of search requests, you can use the +`cluster.routing.use_adaptive_replica_selection` setting: + +[source,js] +-------------------------------------------------- +PUT /_cluster/settings +{ + "transient": { + "cluster.routing.use_adaptive_replica_selection": false + } +} +-------------------------------------------------- +// CONSOLE + diff --git a/docs/reference/modules/gateway.asciidoc b/docs/reference/modules/gateway.asciidoc index cad05d1baaa0c..0af0d31fba2c5 100644 --- a/docs/reference/modules/gateway.asciidoc +++ b/docs/reference/modules/gateway.asciidoc @@ -4,9 +4,9 @@ The local gateway module stores the cluster state and shard data across full cluster restarts. - The following _static_ settings, which must be set on every master node, - control how long a freshly elected master should wait before it tries to - recover the cluster state and the cluster's data: +The following _static_ settings, which must be set on every master node, +control how long a freshly elected master should wait before it tries to +recover the cluster state and the cluster's data: `gateway.expected_nodes`:: diff --git a/docs/reference/modules/plugins.asciidoc b/docs/reference/modules/plugins.asciidoc index 240f984091345..ad708e88024cd 100644 --- a/docs/reference/modules/plugins.asciidoc +++ b/docs/reference/modules/plugins.asciidoc @@ -6,7 +6,7 @@ Plugins are a way to enhance the basic elasticsearch functionality in a custom manner. They range from adding custom mapping types, custom -analyzers (in a more built in fashion), native scripts, custom discovery +analyzers (in a more built in fashion), custom script engines, custom discovery and more. See the {plugins}/index.html[Plugins documentation] for more. diff --git a/docs/reference/modules/scripting/security.asciidoc b/docs/reference/modules/scripting/security.asciidoc index 7e7d5ccf6b209..37168f56b8f14 100644 --- a/docs/reference/modules/scripting/security.asciidoc +++ b/docs/reference/modules/scripting/security.asciidoc @@ -47,21 +47,6 @@ Bad: * Users can write arbitrary scripts, queries, `_search` requests. * User actions make documents with structure defined by users. -[float] -[[modules-scripting-security-do-no-weaken]] -=== Do not weaken script security settings -By default Elasticsearch will run inline, stored, and filesystem scripts for -the builtin languages, namely the scripting language Painless, the template -language Mustache, and the expression language Expressions. These *ought* to be -safe to expose to trusted users and to your application servers because they -have strong security sandboxes. The Elasticsearch committers do not support any -non-sandboxed scripting languages and using any would be a poor choice because: -1. This drops a layer of security, leaving only Elasticsearch's builtin -<>. -2. Non-sandboxed scripts have unchecked access to Elasticsearch's internals and -can cause all kinds of trouble if misused. - - [float] [[modules-scripting-other-layers]] === Other security layers diff --git a/docs/reference/modules/scripting/using.asciidoc b/docs/reference/modules/scripting/using.asciidoc index 37f75f6557a31..646bd4dd0921c 100644 --- a/docs/reference/modules/scripting/using.asciidoc +++ b/docs/reference/modules/scripting/using.asciidoc @@ -178,14 +178,12 @@ DELETE _scripts/calculate-score === Script Caching All scripts are cached by default so that they only need to be recompiled -when updates occur. File scripts keep a static cache and will always reside -in memory. Both inline and stored scripts are stored in a cache that can evict -residing scripts. By default, scripts do not have a time-based expiration, but +when updates occur. By default, scripts do not have a time-based expiration, but you can change this behavior by using the `script.cache.expire` setting. You can configure the size of this cache by using the `script.cache.max_size` setting. By default, the cache size is `100`. NOTE: The size of stored scripts is limited to 65,535 bytes. This can be changed by setting `script.max_size_in_bytes` setting to increase that soft -limit, but if scripts are really large then alternatives like -<> scripts should be considered instead. +limit, but if scripts are really large then a +<> should be considered. diff --git a/docs/reference/query-dsl/percolate-query.asciidoc b/docs/reference/query-dsl/percolate-query.asciidoc index c1f539cbd3ab3..9668af7245a76 100644 --- a/docs/reference/query-dsl/percolate-query.asciidoc +++ b/docs/reference/query-dsl/percolate-query.asciidoc @@ -103,6 +103,9 @@ The above request will yield the following response: "message": "bonsai tree" } } + }, + "fields" : { + "_percolator_document_slot" : [0] <2> } } ] @@ -112,6 +115,8 @@ The above request will yield the following response: // TESTRESPONSE[s/"took": 13,/"took": "$body.took",/] <1> The query with id `1` matches our document. +<2> The `_percolator_document_slot` field indicates which document has matched with this query. + Useful when percolating multiple document simultaneously. [float] ==== Parameters @@ -120,7 +125,10 @@ The following parameters are required when percolating a document: [horizontal] `field`:: The field of type `percolator` that holds the indexed queries. This is a required parameter. +`name`:: The suffix to be used for the `_percolator_document_slot` field in case multiple `percolate` queries have been specified. + This is an optional parameter. `document`:: The source of the document being percolated. +`documents`:: Like the `document` parameter, but accepts multiple documents via a json array. `document_type`:: The type / mapping of the document being percolated. This setting is deprecated and only required for indices created before 6.0 Instead of specifying the source of the document being percolated, the source can also be retrieved from an already @@ -136,6 +144,87 @@ In that case the `document` parameter can be substituted with the following para `preference`:: Optionally, preference to be used to fetch document to percolate. `version`:: Optionally, the expected version of the document to be fetched. +[float] +==== Percolating multiple documents + +The `percolate` query can match multiple documents simultaneously with the indexed percolator queries. +Percolating multiple documents in a single request can improve performance as queries only need to be parsed and +matched once instead of multiple times. + +The `_percolator_document_slot` field that is being returned with each matched percolator query is important when percolating +multiple documents simultaneously. It indicates which documents matched with a particular percolator query. The numbers +correlate with the slot in the `documents` array specified in the `percolate` query. + +[source,js] +-------------------------------------------------- +GET /my-index/_search +{ + "query" : { + "percolate" : { + "field" : "query", + "documents" : [ <1> + { + "message" : "bonsai tree" + }, + { + "message" : "new tree" + }, + { + "message" : "the office" + }, + { + "message" : "office tree" + } + ] + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +<1> The documents array contains 4 documents that are going to be percolated at the same time. + +[source,js] +-------------------------------------------------- +{ + "took": 13, + "timed_out": false, + "_shards": { + "total": 5, + "successful": 5, + "skipped" : 0, + "failed": 0 + }, + "hits": { + "total": 1, + "max_score": 1.5606477, + "hits": [ + { + "_index": "my-index", + "_type": "doc", + "_id": "1", + "_score": 1.5606477, + "_source": { + "query": { + "match": { + "message": "bonsai tree" + } + } + }, + "fields" : { + "_percolator_document_slot" : [0, 1, 3] <1> + } + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took": 13,/"took": "$body.took",/] + +<1> The `_percolator_document_slot` indicates that the first, second and last documents specified in the `percolate` query + are matching with this query. + [float] ==== Percolating an Existing Document @@ -307,6 +396,9 @@ This will yield the following response. "message": [ "The quick brown fox jumps over the lazy dog" <1> ] + }, + "fields" : { + "_percolator_document_slot" : [0] } }, { @@ -325,6 +417,9 @@ This will yield the following response. "message": [ "The quick brown fox jumps over the lazy dog" <1> ] + }, + "fields" : { + "_percolator_document_slot" : [0] } } ] @@ -338,6 +433,179 @@ This will yield the following response. Instead of the query in the search request highlighting the percolator hits, the percolator queries are highlighting the document defined in the `percolate` query. +When percolating multiple documents at the same time like the request below then the highlight response is different: + +[source,js] +-------------------------------------------------- +GET /my-index/_search +{ + "query" : { + "percolate" : { + "field": "query", + "documents" : [ + { + "message" : "bonsai tree" + }, + { + "message" : "new tree" + }, + { + "message" : "the office" + }, + { + "message" : "office tree" + } + ] + } + }, + "highlight": { + "fields": { + "message": {} + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +The slightly different response: + +[source,js] +-------------------------------------------------- +{ + "took": 13, + "timed_out": false, + "_shards": { + "total": 5, + "successful": 5, + "skipped" : 0, + "failed": 0 + }, + "hits": { + "total": 1, + "max_score": 1.5606477, + "hits": [ + { + "_index": "my-index", + "_type": "doc", + "_id": "1", + "_score": 1.5606477, + "_source": { + "query": { + "match": { + "message": "bonsai tree" + } + } + }, + "fields" : { + "_percolator_document_slot" : [0, 1, 3] + }, + "highlight" : { <1> + "0_message" : [ + "bonsai tree" + ], + "3_message" : [ + "office tree" + ], + "1_message" : [ + "new tree" + ] + } + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took": 13,/"took": "$body.took",/] + +<1> The highlight fields have been prefixed with the document slot they belong to, + in order to know which highlight field belongs to what document. + +[float] +==== Specifying multiple percolate queries + +It is possible to specify multiple `percolate` queries in a single search request: + +[source,js] +-------------------------------------------------- +GET /my-index/_search +{ + "query" : { + "bool" : { + "should" : [ + { + "percolate" : { + "field" : "query", + "document" : { + "message" : "bonsai tree" + }, + "name": "query1" <1> + } + }, + { + "percolate" : { + "field" : "query", + "document" : { + "message" : "tulip flower" + }, + "name": "query2" <1> + } + } + ] + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +<1> The `name` parameter will be used to identify which percolator document slots belong to what `percolate` query. + +The `_percolator_document_slot` field name will be suffixed with what is specified in the `_name` parameter. +If that isn't specified then the `field` parameter will be used, which in this case will result in ambiguity. + +The above search request returns a response similar to this: + +[source,js] +-------------------------------------------------- +{ + "took": 13, + "timed_out": false, + "_shards": { + "total": 5, + "successful": 5, + "skipped" : 0, + "failed": 0 + }, + "hits": { + "total": 1, + "max_score": 0.5753642, + "hits": [ + { + "_index": "my-index", + "_type": "doc", + "_id": "1", + "_score": 0.5753642, + "_source": { + "query": { + "match": { + "message": "bonsai tree" + } + } + }, + "fields" : { + "_percolator_document_slot_query1" : [0] <1> + } + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took": 13,/"took": "$body.took",/] + +<1> The `_percolator_document_slot_query1` percolator slot field indicates that these matched slots are from the `percolate` + query with `_name` parameter set to `query1`. + [float] ==== How it Works Under the Hood diff --git a/docs/reference/query-dsl/query-string-query.asciidoc b/docs/reference/query-dsl/query-string-query.asciidoc index a20ac345c2029..f42a3b09f9b6e 100644 --- a/docs/reference/query-dsl/query-string-query.asciidoc +++ b/docs/reference/query-dsl/query-string-query.asciidoc @@ -48,12 +48,12 @@ The `query_string` top level parameters include: |Parameter |Description |`query` |The actual query to be parsed. See <>. -|`default_field` |The default field for query terms if no prefix field -is specified. Defaults to the `index.query.default_field` index -settings, which in turn defaults to `*`. -`*` extracts all fields in the mapping that are eligible to term queries -and filters the metadata fields. All extracted fields are then combined -to build a query when no prefix field is provided. +|`default_field` |The default field for query terms if no prefix field is +specified. Defaults to the `index.query.default_field` index settings, which in +turn defaults to `*`. `*` extracts all fields in the mapping that are eligible +to term queries and filters the metadata fields. All extracted fields are then +combined to build a query when no prefix field is provided. There is a limit of +no more than 1024 fields being queried at once. |`default_operator` |The default operator used if no explicit operator is specified. For example, with a default operator of `OR`, the query @@ -63,6 +63,11 @@ with default operator of `AND`, the same query is translated to |`analyzer` |The analyzer name used to analyze the query string. +|`quote_analyzer` |The name of the analyzer that is used to analyze +quoted phrases in the query string. For those parts, it overrides other +analyzers that are set using the `analyzer` parameter or the +<> setting. + |`allow_leading_wildcard` |When set, `*` or `?` are allowed as the first character. Defaults to `true`. @@ -73,7 +78,7 @@ increments in result queries. Defaults to `true`. expand to. Defaults to `50` |`fuzziness` |Set the fuzziness for fuzzy queries. Defaults -to `AUTO`. See <> for allowed settings. +to `AUTO`. See <> for allowed settings. |`fuzzy_prefix_length` |Set the prefix length for fuzzy queries. Default is `0`. diff --git a/docs/reference/query-dsl/simple-query-string-query.asciidoc b/docs/reference/query-dsl/simple-query-string-query.asciidoc index 68a47193a1435..1f887cf631155 100644 --- a/docs/reference/query-dsl/simple-query-string-query.asciidoc +++ b/docs/reference/query-dsl/simple-query-string-query.asciidoc @@ -29,9 +29,10 @@ The `simple_query_string` top level parameters include: |`query` |The actual query to be parsed. See below for syntax. |`fields` |The fields to perform the parsed query against. Defaults to the -`index.query.default_field` index settings, which in turn defaults to `*`. -`*` extracts all fields in the mapping that are eligible to term queries -and filters the metadata fields. +`index.query.default_field` index settings, which in turn defaults to `*`. `*` +extracts all fields in the mapping that are eligible to term queries and filters +the metadata fields. There is a limit of no more than 1024 fields being queried +at once. |`default_operator` |The default operator used if no explicit operator is specified. For example, with a default operator of `OR`, the query diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index fc40051955dc1..d69175fec3f7c 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -11,10 +11,11 @@ exception of the <> endpoints. [[search-routing]] == Routing -When executing a search, it will be broadcast to all the index/indices -shards (round robin between replicas). Which shards will be searched on -can be controlled by providing the `routing` parameter. For example, -when indexing tweets, the routing value can be the user name: +When executing a search, Elasticsearch will pick the "best" copy of the data +based on the <> formula. +Which shards will be searched on can also be controlled by providing the +`routing` parameter. For example, when indexing tweets, the routing value can be +the user name: [source,js] -------------------------------------------------- @@ -60,30 +61,33 @@ the routing values match to. [[search-adaptive-replica]] == Adaptive Replica Selection -As an alternative to requests being sent to copies of the data in a round robin -fashion, you may enable adaptive replica selection. This allows the coordinating -node to send the request to the copy deemed "best" based on a number of -criteria: +By default, Elasticsearch will use what is called adaptive replica selection. +This allows the coordinating node to send the request to the copy deemed "best" +based on a number of criteria: - Response time of past requests between the coordinating node and the node containing the copy of the data - Time past search requests took to execute on the node containing the data - The queue size of the search threadpool on the node containing the data -This can be turned on by changing the dynamic cluster setting -`cluster.routing.use_adaptive_replica_selection` from `false` to `true`: +This can be turned off by changing the dynamic cluster setting +`cluster.routing.use_adaptive_replica_selection` from `true` to `false`: [source,js] -------------------------------------------------- PUT /_cluster/settings { "transient": { - "cluster.routing.use_adaptive_replica_selection": true + "cluster.routing.use_adaptive_replica_selection": false } } -------------------------------------------------- // CONSOLE +If adaptive replica selection is turned off, searches are sent to the +index/indices shards in a round robin fashion between all copies of the data +(primaries and replicas). + [float] [[stats-groups]] == Stats Groups diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index 1a42dedf47c74..566a659279f60 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -277,6 +277,7 @@ The basic completion suggester query supports the following parameters: `field`:: The name of the field on which to run the query (required). `size`:: The number of suggestions to return (defaults to `5`). +`skip_duplicates`:: Whether duplicate suggestions should be filtered out (defaults to `false`). NOTE: The completion suggester considers all documents in the index. See <> for an explanation of how to query a subset of @@ -291,6 +292,33 @@ index completions into a single shard index. In case of high heap usage due to shard size, it is still recommended to break index into multiple shards instead of optimizing for completion performance. +[[skip_duplicates]] +==== Skip duplicate suggestions + +Queries can return duplicate suggestions coming from different documents. +It is possible to modify this behavior by setting `skip_duplicates` to true. +When set, this option filters out documents with duplicate suggestions from the result. + +[source,js] +-------------------------------------------------- +POST music/_search?pretty +{ + "suggest": { + "song-suggest" : { + "prefix" : "nor", + "completion" : { + "field" : "suggest", + "skip_duplicates": true + } + } + } +} +-------------------------------------------------- +// CONSOLE + +WARNING: when set to true this option can slow down search because more suggestions +need to be visited to find the top N. + [[fuzzy]] ==== Fuzzy queries diff --git a/docs/reference/setup/bootstrap-checks.asciidoc b/docs/reference/setup/bootstrap-checks.asciidoc index 93a6fafde6815..3fd5b6053fa2f 100644 --- a/docs/reference/setup/bootstrap-checks.asciidoc +++ b/docs/reference/setup/bootstrap-checks.asciidoc @@ -23,39 +23,43 @@ documented individually. [float] === Development vs. production mode -By default, Elasticsearch binds to `localhost` for <> -and <> communication. This is -fine for downloading and playing with Elasticsearch, and everyday -development but it's useless for production systems. To form a cluster, -Elasticsearch instances must be reachable via transport communication so -they must bind transport to an external interface. Thus, we consider an -Elasticsearch instance to be in development mode if it does not bind -transport to an external interface (the default), and is otherwise in -production mode if it does bind transport to an external interface. - -Note that HTTP can be configured independently of transport via -<> and <>; -this can be useful for configuring a single instance to be reachable via -HTTP for testing purposes without triggering production mode. - -We recognize that some users need to bind transport to an external -interface for testing their usage of the transport client. For this -situation, we provide the discovery type `single-node` (configure it by -setting `discovery.type` to `single-node`); in this situation, a node -will elect itself master and will not form a cluster with any other -node. - -If you are running a single node in production, it is possible to evade -the bootstrap checks (either by not binding transport to an external -interface, or by binding transport to an external interface and setting -the discovery type to `single-node`). For this situation, you can force -execution of the bootstrap checks by setting the system property -`es.enforce.bootstrap.checks` to `true` (set this in <>, or -by adding `-Des.enforce.bootstrap.checks=true` to the environment -variable `ES_JAVA_OPTS`). We strongly encourage you to do this if you -are in this specific situation. This system property can be used to -force execution of the bootstrap checks independent of the node -configuration. +By default, Elasticsearch binds to `localhost` for <> and +<> communication. This is fine for +downloading and playing with Elasticsearch, and everyday development but it's +useless for production systems. To join a cluster, an Elasticsearch node must be +reachable via transport communication. To join a cluster over an external +network interface, a node must bind transport to an external interface and not +be using <>. Thus, we consider an +Elasticsearch node to be in development mode if it can not form a cluster with +another machine over an external network interface, and is otherwise in +production mode if it can join a cluster over an external interface. + +Note that HTTP and transport can be configured independently via +<> and <>; this +can be useful for configuring a single node to be reachable via HTTP for testing +purposes without triggering production mode. + +[[single-node-discovery]] +[float] +=== Single-node discovery +We recognize that some users need to bind transport to an external interface for +testing their usage of the transport client. For this situation, we provide the +discovery type `single-node` (configure it by setting `discovery.type` to +`single-node`); in this situation, a node will elect itself master and will not +join a cluster with any other node. + + +[float] +=== Forcing the bootstrap checks +If you are running a single node in production, it is possible to evade the +bootstrap checks (either by not binding transport to an external interface, or +by binding transport to an external interface and setting the discovery type to +`single-node`). For this situation, you can force execution of the bootstrap +checks by setting the system property `es.enforce.bootstrap.checks` to `true` +(set this in <>, or by adding `-Des.enforce.bootstrap.checks=true` +to the environment variable `ES_JAVA_OPTS`). We strongly encourage you to do +this if you are in this specific situation. This system property can be used to +force execution of the bootstrap checks independent of the node configuration. === Heap size check diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java index 8f3df14659a95..17dc2740ee426 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java @@ -108,7 +108,7 @@ protected Class loadClass(String name, boolean resolve) throws ClassNotFoundE ExecutableScript.Factory factory = (p) -> new ExpressionExecutableScript(expr, p); return context.factoryClazz.cast(factory); } - throw new IllegalArgumentException("painless does not know how to handle context [" + context.name + "]"); + throw new IllegalArgumentException("expression engine does not know how to handle script context [" + context.name + "]"); } private SearchScript.LeafFactory newSearchScript(Expression expr, SearchLookup lookup, @Nullable Map vars) { diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java index c60babf5b2dc2..dd1009d775223 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java @@ -70,7 +70,7 @@ class ParentChildInnerHitContextBuilder extends InnerHitContextBuilder { } @Override - public void build(SearchContext parentSearchContext, InnerHitsContext innerHitsContext) throws IOException { + protected void doBuild(SearchContext parentSearchContext, InnerHitsContext innerHitsContext) throws IOException { if (parentSearchContext.mapperService().getIndexSettings().isSingleType()) { handleJoinFieldInnerHits(parentSearchContext, innerHitsContext); } else { diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/InnerHitsIT.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/InnerHitsIT.java index d9c8aaddc78e1..6efd5256e5422 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/InnerHitsIT.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/InnerHitsIT.java @@ -22,7 +22,9 @@ import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.util.ArrayUtil; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -506,13 +508,16 @@ public void testMatchesQueriesParentChildInnerHits() throws Exception { assertThat(response.getHits().getAt(0).getInnerHits().get("child").getAt(0).getMatchedQueries()[0], equalTo("_name2")); } - public void testDontExplode() throws Exception { + public void testUseMaxDocInsteadOfSize() throws Exception { if (legacy()) { assertAcked(prepareCreate("index1").addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("index1") .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } + client().admin().indices().prepareUpdateSettings("index1") + .setSettings(Collections.singletonMap(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), ArrayUtil.MAX_ARRAY_LENGTH)) + .get(); List requests = new ArrayList<>(); requests.add(createIndexRequest("index1", "parent", "1", null)); requests.add(createIndexRequest("index1", "child", "2", "1", "field", "value1")); @@ -585,4 +590,56 @@ public void testInnerHitsWithIgnoreUnmapped() throws Exception { assertHitCount(response, 2); assertSearchHits(response, "1", "3"); } + + public void testTooHighResultWindow() throws Exception { + if (legacy()) { + assertAcked(prepareCreate("index1") + .addMapping("parent_type", "nested_type", "type=nested") + .addMapping("child_type", "_parent", "type=parent_type") + ); + } else { + assertAcked(prepareCreate("index1") + .addMapping("doc", addFieldMappings( + buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent_type", "child_type"), + "nested_type", "nested")) + ); + } + createIndexRequest("index1", "parent_type", "1", null, "nested_type", Collections.singletonMap("key", "value")).get(); + createIndexRequest("index1", "child_type", "2", "1").get(); + refresh(); + + SearchResponse response = client().prepareSearch("index1") + .setQuery(hasChildQuery("child_type", matchAllQuery(), ScoreMode.None).ignoreUnmapped(true) + .innerHit(new InnerHitBuilder().setFrom(50).setSize(10).setName("_name"))) + .get(); + assertNoFailures(response); + assertHitCount(response, 1); + + Exception e = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch("index1") + .setQuery(hasChildQuery("child_type", matchAllQuery(), ScoreMode.None).ignoreUnmapped(true) + .innerHit(new InnerHitBuilder().setFrom(100).setSize(10).setName("_name"))) + .get()); + assertThat(e.getCause().getMessage(), + containsString("the inner hit definition's [_name]'s from + size must be less than or equal to: [100] but was [110]")); + e = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch("index1") + .setQuery(hasChildQuery("child_type", matchAllQuery(), ScoreMode.None).ignoreUnmapped(true) + .innerHit(new InnerHitBuilder().setFrom(10).setSize(100).setName("_name"))) + .get()); + assertThat(e.getCause().getMessage(), + containsString("the inner hit definition's [_name]'s from + size must be less than or equal to: [100] but was [110]")); + + client().admin().indices().prepareUpdateSettings("index1") + .setSettings(Collections.singletonMap(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), 110)) + .get(); + response = client().prepareSearch("index1") + .setQuery(hasChildQuery("child_type", matchAllQuery(), ScoreMode.None).ignoreUnmapped(true) + .innerHit(new InnerHitBuilder().setFrom(100).setSize(10).setName("_name"))) + .get(); + assertNoFailures(response); + response = client().prepareSearch("index1") + .setQuery(hasChildQuery("child_type", matchAllQuery(), ScoreMode.None).ignoreUnmapped(true) + .innerHit(new InnerHitBuilder().setFrom(10).setSize(100).setName("_name"))) + .get(); + assertNoFailures(response); + } } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java index a7ca013ec22b9..910c716db6934 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.lucene.Lucene; import java.io.IOException; +import java.util.List; import java.util.Objects; import java.util.Set; @@ -46,15 +47,17 @@ final class PercolateQuery extends Query implements Accountable { // cost of matching the query against the document, arbitrary as it would be really complex to estimate private static final float MATCH_COST = 1000; + private final String name; private final QueryStore queryStore; - private final BytesReference documentSource; + private final List documents; private final Query candidateMatchesQuery; private final Query verifiedMatchesQuery; private final IndexSearcher percolatorIndexSearcher; - PercolateQuery(QueryStore queryStore, BytesReference documentSource, + PercolateQuery(String name, QueryStore queryStore, List documents, Query candidateMatchesQuery, IndexSearcher percolatorIndexSearcher, Query verifiedMatchesQuery) { - this.documentSource = Objects.requireNonNull(documentSource); + this.name = name; + this.documents = Objects.requireNonNull(documents); this.candidateMatchesQuery = Objects.requireNonNull(candidateMatchesQuery); this.queryStore = Objects.requireNonNull(queryStore); this.percolatorIndexSearcher = Objects.requireNonNull(percolatorIndexSearcher); @@ -65,7 +68,7 @@ final class PercolateQuery extends Query implements Accountable { public Query rewrite(IndexReader reader) throws IOException { Query rewritten = candidateMatchesQuery.rewrite(reader); if (rewritten != candidateMatchesQuery) { - return new PercolateQuery(queryStore, documentSource, rewritten, percolatorIndexSearcher, verifiedMatchesQuery); + return new PercolateQuery(name, queryStore, documents, rewritten, percolatorIndexSearcher, verifiedMatchesQuery); } else { return this; } @@ -164,12 +167,16 @@ boolean matchDocId(int docId) throws IOException { }; } + String getName() { + return name; + } + IndexSearcher getPercolatorIndexSearcher() { return percolatorIndexSearcher; } - BytesReference getDocumentSource() { - return documentSource; + List getDocuments() { + return documents; } QueryStore getQueryStore() { @@ -193,13 +200,22 @@ public int hashCode() { @Override public String toString(String s) { - return "PercolateQuery{document_source={" + documentSource.utf8ToString() + "},inner={" + + StringBuilder sources = new StringBuilder(); + for (BytesReference document : documents) { + sources.append(document.utf8ToString()); + sources.append('\n'); + } + return "PercolateQuery{document_sources={" + sources + "},inner={" + candidateMatchesQuery.toString(s) + "}}"; } @Override public long ramBytesUsed() { - return documentSource.ramBytesUsed(); + long ramUsed = 0L; + for (BytesReference document : documents) { + ramUsed += document.ramBytesUsed(); + } + return ramUsed; } @FunctionalInterface diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index 30da327c81f32..337b7ee2f36b5 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -57,9 +57,11 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.analysis.FieldNameAnalyzer; @@ -69,6 +71,7 @@ import org.elasticsearch.index.mapper.DocumentMapperForType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -81,7 +84,10 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; +import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; +import java.util.List; import java.util.Objects; import java.util.function.Supplier; @@ -94,6 +100,8 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder documents; private final XContentType documentXContentType; private final String indexedDocumentIndex; @@ -123,7 +132,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder documents, XContentType documentXContentType) { + this(field, null, documents, documentXContentType); } @Deprecated - public PercolateQueryBuilder(String field, String documentType, BytesReference document, XContentType documentXContentType) { + public PercolateQueryBuilder(String field, String documentType, List documents, XContentType documentXContentType) { if (field == null) { throw new IllegalArgumentException("[field] is a required argument"); } - if (document == null) { + if (documents == null) { throw new IllegalArgumentException("[document] is a required argument"); } this.field = field; this.documentType = documentType; - this.document = document; + this.documents = documents; this.documentXContentType = Objects.requireNonNull(documentXContentType); indexedDocumentIndex = null; indexedDocumentType = null; @@ -164,7 +184,7 @@ private PercolateQueryBuilder(String field, String documentType, Supplier_percolator_document_slot response field + * when multiple percolate queries have been specified in the main query. + */ + public PercolateQueryBuilder setName(String name) { + this.name = name; + return this; + } + @Override protected void doWriteTo(StreamOutput out) throws IOException { if (documentSupplier != null) { throw new IllegalStateException("supplier must be null, can't serialize suppliers, missing a rewriteAndFetch?"); } out.writeString(field); + if (out.getVersion().onOrAfter(Version.V_6_1_0)) { + out.writeOptionalString(name); + } if (out.getVersion().before(Version.V_6_0_0_beta1)) { out.writeString(documentType); } else { @@ -277,8 +317,19 @@ protected void doWriteTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } - out.writeOptionalBytesReference(document); - if (document != null && out.getVersion().onOrAfter(Version.V_5_3_0)) { + if (out.getVersion().onOrAfter(Version.V_6_1_0)) { + out.writeVInt(documents.size()); + for (BytesReference document : documents) { + out.writeBytesReference(document); + } + } else { + if (documents.size() > 1) { + throw new IllegalArgumentException("Nodes prior to 6.1.0 cannot accept multiple documents"); + } + BytesReference doc = documents.isEmpty() ? null : documents.iterator().next(); + out.writeOptionalBytesReference(doc); + } + if (documents.isEmpty() == false && out.getVersion().onOrAfter(Version.V_5_3_0)) { documentXContentType.writeTo(out); } } @@ -288,8 +339,18 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep builder.startObject(NAME); builder.field(DOCUMENT_TYPE_FIELD.getPreferredName(), documentType); builder.field(QUERY_FIELD.getPreferredName(), field); - if (document != null) { - builder.rawField(DOCUMENT_FIELD.getPreferredName(), document); + if (name != null) { + builder.field(NAME_FIELD.getPreferredName(), name); + } + if (documents.isEmpty() == false) { + builder.startArray(DOCUMENTS_FIELD.getPreferredName()); + for (BytesReference document : documents) { + try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, document)) { + parser.nextToken(); + XContentHelper.copyCurrentStructure(builder.generator(), parser); + } + } + builder.endArray(); } if (indexedDocumentIndex != null || indexedDocumentType != null || indexedDocumentId != null) { if (indexedDocumentIndex != null) { @@ -319,6 +380,7 @@ public static PercolateQueryBuilder fromXContent(XContentParser parser) throws I float boost = AbstractQueryBuilder.DEFAULT_BOOST; String field = null; + String name = null; String documentType = null; String indexedDocumentIndex = null; @@ -328,29 +390,62 @@ public static PercolateQueryBuilder fromXContent(XContentParser parser) throws I String indexedDocumentPreference = null; Long indexedDocumentVersion = null; - BytesReference source = null; + List documents = new ArrayList<>(); String queryName = null; String currentFieldName = null; + boolean documentsSpecified = false; + boolean documentSpecified = false; + XContentParser.Token token; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_ARRAY) { + if (DOCUMENTS_FIELD.match(currentFieldName)) { + if (documentSpecified) { + throw new IllegalArgumentException("[" + PercolateQueryBuilder.NAME + + "] Either specified [document] or [documents], not both"); + } + documentsSpecified = true; + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.START_OBJECT) { + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + builder.copyCurrentStructure(parser); + builder.flush(); + documents.add(builder.bytes()); + } + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + PercolateQueryBuilder.NAME + + "] query does not support [" + token + "]"); + } + } + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + PercolateQueryBuilder.NAME + + "] query does not field name [" + currentFieldName + "]"); + } } else if (token == XContentParser.Token.START_OBJECT) { if (DOCUMENT_FIELD.match(currentFieldName)) { + if (documentsSpecified) { + throw new IllegalArgumentException("[" + PercolateQueryBuilder.NAME + + "] Either specified [document] or [documents], not both"); + } + documentSpecified = true; try (XContentBuilder builder = XContentFactory.jsonBuilder()) { builder.copyCurrentStructure(parser); builder.flush(); - source = builder.bytes(); + documents.add(builder.bytes()); } } else { throw new ParsingException(parser.getTokenLocation(), "[" + PercolateQueryBuilder.NAME + - "] query does not support [" + token + "]"); + "] query does not support field name [" + currentFieldName + "]"); } } else if (token.isValue() || token == XContentParser.Token.VALUE_NULL) { if (QUERY_FIELD.match(currentFieldName)) { field = parser.text(); + } else if (NAME_FIELD.match(currentFieldName)) { + name = parser.textOrNull(); } else if (DOCUMENT_TYPE_FIELD.match(currentFieldName)) { documentType = parser.textOrNull(); } else if (INDEXED_DOCUMENT_FIELD_INDEX.match(currentFieldName)) { @@ -380,14 +475,17 @@ public static PercolateQueryBuilder fromXContent(XContentParser parser) throws I } PercolateQueryBuilder queryBuilder; - if (source != null) { - queryBuilder = new PercolateQueryBuilder(field, documentType, source, XContentType.JSON); + if (documents.isEmpty() == false) { + queryBuilder = new PercolateQueryBuilder(field, documentType, documents, XContentType.JSON); } else if (indexedDocumentId != null) { queryBuilder = new PercolateQueryBuilder(field, documentType, indexedDocumentIndex, indexedDocumentType, indexedDocumentId, indexedDocumentRouting, indexedDocumentPreference, indexedDocumentVersion); } else { throw new IllegalArgumentException("[" + PercolateQueryBuilder.NAME + "] query, nothing to percolate"); } + if (name != null) { + queryBuilder.setName(name); + } queryBuilder.queryName(queryName); queryBuilder.boost(boost); return queryBuilder; @@ -397,7 +495,7 @@ public static PercolateQueryBuilder fromXContent(XContentParser parser) throws I protected boolean doEquals(PercolateQueryBuilder other) { return Objects.equals(field, other.field) && Objects.equals(documentType, other.documentType) - && Objects.equals(document, other.document) + && Objects.equals(documents, other.documents) && Objects.equals(indexedDocumentIndex, other.indexedDocumentIndex) && Objects.equals(indexedDocumentType, other.indexedDocumentType) && Objects.equals(documentSupplier, other.documentSupplier) @@ -407,7 +505,7 @@ protected boolean doEquals(PercolateQueryBuilder other) { @Override protected int doHashCode() { - return Objects.hash(field, documentType, document, indexedDocumentIndex, indexedDocumentType, indexedDocumentId, documentSupplier); + return Objects.hash(field, documentType, documents, indexedDocumentIndex, indexedDocumentType, indexedDocumentId, documentSupplier); } @Override @@ -417,14 +515,15 @@ public String getWriteableName() { @Override protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) { - if (document != null) { + if (documents.isEmpty() == false) { return this; } else if (documentSupplier != null) { final BytesReference source = documentSupplier.get(); if (source == null) { return this; // not executed yet } else { - return new PercolateQueryBuilder(field, documentType, source, XContentFactory.xContentType(source)); + return new PercolateQueryBuilder(field, documentType, Collections.singletonList(source), + XContentFactory.xContentType(source)); } } GetRequest getRequest = new GetRequest(indexedDocumentIndex, indexedDocumentType, indexedDocumentId); @@ -464,7 +563,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException { throw new IllegalStateException("query builder must be rewritten first"); } - if (document == null) { + if (documents.isEmpty()) { throw new IllegalStateException("no document to percolate"); } @@ -478,7 +577,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException { "] to be of type [percolator], but is of type [" + fieldType.typeName() + "]"); } - final ParsedDocument doc; + final List docs = new ArrayList<>(); final DocumentMapper docMapper; final MapperService mapperService = context.getMapperService(); if (context.getIndexSettings().isSingleType()) { @@ -495,14 +594,18 @@ protected Query doToQuery(QueryShardContext context) throws IOException { } } docMapper = mapperService.documentMapper(type); - doc = docMapper.parse(source(context.index().getName(), type, "_temp_id", document, documentXContentType)); + for (BytesReference document : documents) { + docs.add(docMapper.parse(source(context.index().getName(), type, "_temp_id", document, documentXContentType))); + } } else { if (documentType == null) { throw new IllegalArgumentException("[percolate] query is missing required [document_type] parameter"); } DocumentMapperForType docMapperForType = mapperService.documentMapperWithAutoCreate(documentType); docMapper = docMapperForType.getDocumentMapper(); - doc = docMapper.parse(source(context.index().getName(), documentType, "_temp_id", document, documentXContentType)); + for (BytesReference document : documents) { + docs.add(docMapper.parse(source(context.index().getName(), documentType, "_temp_id", document, documentXContentType))); + } } FieldNameAnalyzer fieldNameAnalyzer = (FieldNameAnalyzer) docMapper.mappers().indexAnalyzer(); @@ -520,22 +623,23 @@ protected Analyzer getWrappedAnalyzer(String fieldName) { } }; final IndexSearcher docSearcher; - if (doc.docs().size() > 1) { - assert docMapper.hasNestedObjects(); - docSearcher = createMultiDocumentSearcher(analyzer, doc); + if (docs.size() > 1 || docs.get(0).docs().size() > 1) { + assert docs.size() != 1 || docMapper.hasNestedObjects(); + docSearcher = createMultiDocumentSearcher(analyzer, docs); } else { - MemoryIndex memoryIndex = MemoryIndex.fromDocument(doc.rootDoc(), analyzer, true, false); + MemoryIndex memoryIndex = MemoryIndex.fromDocument(docs.get(0).rootDoc(), analyzer, true, false); docSearcher = memoryIndex.createSearcher(); docSearcher.setQueryCache(null); } - boolean mapUnmappedFieldsAsString = context.getIndexSettings() - .getValue(PercolatorFieldMapper.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING); + PercolatorFieldMapper percolatorFieldMapper = (PercolatorFieldMapper) docMapper.mappers().getMapper(field); + boolean mapUnmappedFieldsAsString = percolatorFieldMapper.isMapUnmappedFieldAsText(); QueryShardContext percolateShardContext = wrap(context); + String name = this.name != null ? this.name : field; PercolatorFieldMapper.FieldType pft = (PercolatorFieldMapper.FieldType) fieldType; PercolateQuery.QueryStore queryStore = createStore(pft.queryBuilderField, percolateShardContext, mapUnmappedFieldsAsString); - return pft.percolateQuery(queryStore, document, docSearcher); + return pft.percolateQuery(name, queryStore, documents, docSearcher); } public String getField() { @@ -546,8 +650,8 @@ public String getDocumentType() { return documentType; } - public BytesReference getDocument() { - return document; + public List getDocuments() { + return documents; } //pkg-private for testing @@ -555,12 +659,17 @@ XContentType getXContentType() { return documentXContentType; } - static IndexSearcher createMultiDocumentSearcher(Analyzer analyzer, ParsedDocument doc) { + static IndexSearcher createMultiDocumentSearcher(Analyzer analyzer, Collection docs) { RAMDirectory ramDirectory = new RAMDirectory(); try (IndexWriter indexWriter = new IndexWriter(ramDirectory, new IndexWriterConfig(analyzer))) { - indexWriter.addDocuments(doc.docs()); - indexWriter.commit(); - DirectoryReader directoryReader = DirectoryReader.open(ramDirectory); + // Indexing in order here, so that the user provided order matches with the docid sequencing: + Iterable iterable = () -> docs.stream() + .map(ParsedDocument::docs) + .flatMap(Collection::stream) + .iterator(); + indexWriter.addDocuments(iterable); + + DirectoryReader directoryReader = DirectoryReader.open(indexWriter); assert directoryReader.leaves().size() == 1 : "Expected single leaf, but got [" + directoryReader.leaves().size() + "]"; final IndexSearcher slowSearcher = new IndexSearcher(directoryReader) { diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java index ee8c7ff44fbe6..e7b5eca24ad48 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java @@ -45,6 +45,8 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.hash.MurmurHash3; import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -91,9 +93,13 @@ public class PercolatorFieldMapper extends FieldMapper { static final XContentType QUERY_BUILDER_CONTENT_TYPE = XContentType.SMILE; - static final Setting INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING = - Setting.boolSetting("index.percolator.map_unmapped_fields_as_string", false, Setting.Property.IndexScope); + @Deprecated + static final Setting INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING = Setting.boolSetting( + "index.percolator.map_unmapped_fields_as_string", false, Setting.Property.IndexScope, Setting.Property.Deprecated); + static final Setting INDEX_MAP_UNMAPPED_FIELDS_AS_TEXT_SETTING = Setting.boolSetting( + "index.percolator.map_unmapped_fields_as_text", false, Setting.Property.IndexScope); static final String CONTENT_TYPE = "percolator"; + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(PercolatorFieldMapper.class)); private static final FieldType FIELD_TYPE = new FieldType(); static final byte FIELD_VALUE_SEPARATOR = 0; // nul code point @@ -229,7 +235,7 @@ public Query termQuery(Object value, QueryShardContext context) { throw new QueryShardException(context, "Percolator fields are not searchable directly, use a percolate query instead"); } - Query percolateQuery(PercolateQuery.QueryStore queryStore, BytesReference documentSource, + Query percolateQuery(String name, PercolateQuery.QueryStore queryStore, List documents, IndexSearcher searcher) throws IOException { IndexReader indexReader = searcher.getIndexReader(); Query candidateMatchesQuery = createCandidateQuery(indexReader); @@ -241,9 +247,9 @@ Query percolateQuery(PercolateQuery.QueryStore queryStore, BytesReference docume if (indexReader.maxDoc() == 1) { verifiedMatchesQuery = new TermQuery(new Term(extractionResultField.name(), EXTRACTION_COMPLETE)); } else { - verifiedMatchesQuery = new MatchNoDocsQuery("nested docs, so no verified matches"); + verifiedMatchesQuery = new MatchNoDocsQuery("multiple/nested docs, so no verified matches"); } - return new PercolateQuery(queryStore, documentSource, candidateMatchesQuery, searcher, verifiedMatchesQuery); + return new PercolateQuery(name, queryStore, documents, candidateMatchesQuery, searcher, verifiedMatchesQuery); } Query createCandidateQuery(IndexReader indexReader) throws IOException { @@ -295,7 +301,7 @@ Query createCandidateQuery(IndexReader indexReader) throws IOException { } - private final boolean mapUnmappedFieldAsString; + private final boolean mapUnmappedFieldAsText; private final Supplier queryShardContext; private KeywordFieldMapper queryTermsField; private KeywordFieldMapper extractionResultField; @@ -315,11 +321,28 @@ Query createCandidateQuery(IndexReader indexReader) throws IOException { this.queryTermsField = queryTermsField; this.extractionResultField = extractionResultField; this.queryBuilderField = queryBuilderField; - this.mapUnmappedFieldAsString = INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING.get(indexSettings); + this.mapUnmappedFieldAsText = getMapUnmappedFieldAsText(indexSettings); this.rangeFieldMapper = rangeFieldMapper; this.boostFields = boostFields; } + private static boolean getMapUnmappedFieldAsText(Settings indexSettings) { + if (INDEX_MAP_UNMAPPED_FIELDS_AS_TEXT_SETTING.exists(indexSettings) && + INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING.exists(indexSettings)) { + throw new IllegalArgumentException("Either specify [" + INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING.getKey() + + "] or [" + INDEX_MAP_UNMAPPED_FIELDS_AS_TEXT_SETTING.getKey() + "] setting, not both"); + } + + if (INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING.exists(indexSettings)) { + DEPRECATION_LOGGER.deprecatedAndMaybeLog(INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING.getKey(), + "The [" + INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING.getKey() + + "] setting is deprecated in favour for the [" + INDEX_MAP_UNMAPPED_FIELDS_AS_TEXT_SETTING.getKey() + "] setting"); + return INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING.get(indexSettings); + } else { + return INDEX_MAP_UNMAPPED_FIELDS_AS_TEXT_SETTING.get(indexSettings); + } + } + @Override public FieldMapper updateFieldType(Map fullNameToFieldType) { PercolatorFieldMapper updated = (PercolatorFieldMapper) super.updateFieldType(fullNameToFieldType); @@ -364,7 +387,7 @@ public Mapper parse(ParseContext context) throws IOException { Version indexVersion = context.mapperService().getIndexSettings().getIndexVersionCreated(); createQueryBuilderField(indexVersion, queryBuilderField, queryBuilder, context); - Query query = toQuery(queryShardContext, mapUnmappedFieldAsString, queryBuilder); + Query query = toQuery(queryShardContext, mapUnmappedFieldAsText, queryBuilder); processQuery(query, context); return null; } @@ -487,6 +510,10 @@ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, } } + boolean isMapUnmappedFieldAsText() { + return mapUnmappedFieldAsText; + } + /** * Fails if a percolator contains an unsupported query. The following queries are not supported: * 1) a has_child query diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java index 5d26993c0341c..a0f3c006290d0 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java @@ -29,12 +29,14 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.fetch.FetchSubPhase; +import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; import org.elasticsearch.search.fetch.subphase.highlight.HighlightPhase; import org.elasticsearch.search.fetch.subphase.highlight.Highlighter; import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight; @@ -42,6 +44,7 @@ import org.elasticsearch.search.internal.SubSearchContext; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; @@ -58,68 +61,103 @@ final class PercolatorHighlightSubFetchPhase extends HighlightPhase { boolean hitsExecutionNeeded(SearchContext context) { // for testing - return context.highlight() != null && locatePercolatorQuery(context.query()) != null; + return context.highlight() != null && locatePercolatorQuery(context.query()).isEmpty() == false; } @Override - public void hitsExecute(SearchContext context, SearchHit[] hits) { + public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException { if (hitsExecutionNeeded(context) == false) { return; } - PercolateQuery percolateQuery = locatePercolatorQuery(context.query()); - if (percolateQuery == null) { + List percolateQueries = locatePercolatorQuery(context.query()); + if (percolateQueries.isEmpty()) { // shouldn't happen as we checked for the existence of a percolator query in hitsExecutionNeeded(...) throw new IllegalStateException("couldn't locate percolator query"); } - List ctxs = context.searcher().getIndexReader().leaves(); - IndexSearcher percolatorIndexSearcher = percolateQuery.getPercolatorIndexSearcher(); - PercolateQuery.QueryStore queryStore = percolateQuery.getQueryStore(); + boolean singlePercolateQuery = percolateQueries.size() == 1; + for (PercolateQuery percolateQuery : percolateQueries) { + String fieldName = singlePercolateQuery ? PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX : + PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX + "_" + percolateQuery.getName(); + List ctxs = context.searcher().getIndexReader().leaves(); + IndexSearcher percolatorIndexSearcher = percolateQuery.getPercolatorIndexSearcher(); + PercolateQuery.QueryStore queryStore = percolateQuery.getQueryStore(); - LeafReaderContext percolatorLeafReaderContext = percolatorIndexSearcher.getIndexReader().leaves().get(0); - FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext(); - SubSearchContext subSearchContext = - createSubSearchContext(context, percolatorLeafReaderContext, percolateQuery.getDocumentSource()); + LeafReaderContext percolatorLeafReaderContext = percolatorIndexSearcher.getIndexReader().leaves().get(0); + FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext(); - for (SearchHit hit : hits) { - final Query query; - try { + for (SearchHit hit : hits) { LeafReaderContext ctx = ctxs.get(ReaderUtil.subIndex(hit.docId(), ctxs)); int segmentDocId = hit.docId() - ctx.docBase; - query = queryStore.getQueries(ctx).apply(segmentDocId); - } catch (IOException e) { - throw new RuntimeException(e); - } - if (query != null) { - subSearchContext.parsedQuery(new ParsedQuery(query)); - hitContext.reset( - new SearchHit(0, "unknown", new Text(hit.getType()), Collections.emptyMap()), - percolatorLeafReaderContext, 0, percolatorIndexSearcher - ); - hitContext.cache().clear(); - super.hitExecute(subSearchContext, hitContext); - hit.getHighlightFields().putAll(hitContext.hit().getHighlightFields()); + final Query query = queryStore.getQueries(ctx).apply(segmentDocId); + if (query != null) { + DocumentField field = hit.field(fieldName); + if (field == null) { + // It possible that a hit did not match with a particular percolate query, + // so then continue highlighting with the next hit. + continue; + } + + for (Object matchedSlot : field.getValues()) { + int slot = (int) matchedSlot; + BytesReference document = percolateQuery.getDocuments().get(slot); + SubSearchContext subSearchContext = + createSubSearchContext(context, percolatorLeafReaderContext, document, slot); + subSearchContext.parsedQuery(new ParsedQuery(query)); + hitContext.reset( + new SearchHit(slot, "unknown", new Text(hit.getType()), Collections.emptyMap()), + percolatorLeafReaderContext, slot, percolatorIndexSearcher + ); + hitContext.cache().clear(); + super.hitExecute(subSearchContext, hitContext); + for (Map.Entry entry : hitContext.hit().getHighlightFields().entrySet()) { + if (percolateQuery.getDocuments().size() == 1) { + String hlFieldName; + if (singlePercolateQuery) { + hlFieldName = entry.getKey(); + } else { + hlFieldName = percolateQuery.getName() + "_" + entry.getKey(); + } + hit.getHighlightFields().put(hlFieldName, new HighlightField(hlFieldName, entry.getValue().fragments())); + } else { + // In case multiple documents are being percolated we need to identify to which document + // a highlight belongs to. + String hlFieldName; + if (singlePercolateQuery) { + hlFieldName = slot + "_" + entry.getKey(); + } else { + hlFieldName = percolateQuery.getName() + "_" + slot + "_" + entry.getKey(); + } + hit.getHighlightFields().put(hlFieldName, new HighlightField(hlFieldName, entry.getValue().fragments())); + } + } + } + } } } } - static PercolateQuery locatePercolatorQuery(Query query) { + static List locatePercolatorQuery(Query query) { if (query instanceof PercolateQuery) { - return (PercolateQuery) query; + return Collections.singletonList((PercolateQuery) query); } else if (query instanceof BooleanQuery) { + List percolateQueries = new ArrayList<>(); for (BooleanClause clause : ((BooleanQuery) query).clauses()) { - PercolateQuery result = locatePercolatorQuery(clause.getQuery()); - if (result != null) { - return result; + List result = locatePercolatorQuery(clause.getQuery()); + if (result.isEmpty() == false) { + percolateQueries.addAll(result); } } + return percolateQueries; } else if (query instanceof DisjunctionMaxQuery) { + List percolateQueries = new ArrayList<>(); for (Query disjunct : ((DisjunctionMaxQuery) query).getDisjuncts()) { - PercolateQuery result = locatePercolatorQuery(disjunct); - if (result != null) { - return result; + List result = locatePercolatorQuery(disjunct); + if (result.isEmpty() == false) { + percolateQueries.addAll(result); } } + return percolateQueries; } else if (query instanceof ConstantScoreQuery) { return locatePercolatorQuery(((ConstantScoreQuery) query).getQuery()); } else if (query instanceof BoostQuery) { @@ -127,16 +165,16 @@ static PercolateQuery locatePercolatorQuery(Query query) { } else if (query instanceof FunctionScoreQuery) { return locatePercolatorQuery(((FunctionScoreQuery) query).getSubQuery()); } - - return null; + return Collections.emptyList(); } - private SubSearchContext createSubSearchContext(SearchContext context, LeafReaderContext leafReaderContext, BytesReference source) { + private SubSearchContext createSubSearchContext(SearchContext context, LeafReaderContext leafReaderContext, + BytesReference source, int docId) { SubSearchContext subSearchContext = new SubSearchContext(context); subSearchContext.highlight(new SearchContextHighlight(context.highlight().fields())); // Enforce highlighting by source, because MemoryIndex doesn't support stored fields. subSearchContext.highlight().globalForceSource(true); - subSearchContext.lookup().source().setSegmentAndDocument(leafReaderContext, 0); + subSearchContext.lookup().source().setSegmentAndDocument(leafReaderContext, docId); subSearchContext.lookup().source().setSource(source); return subSearchContext; } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java new file mode 100644 index 0000000000000..163c4183dd48e --- /dev/null +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java @@ -0,0 +1,121 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.percolator; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.ReaderUtil; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.BitSet; +import org.apache.lucene.util.BitSetIterator; +import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.fetch.FetchSubPhase; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; +import static org.elasticsearch.percolator.PercolatorHighlightSubFetchPhase.locatePercolatorQuery; + +/** + * Adds a special field to the a percolator query hit to indicate which documents matched with the percolator query. + * This is useful when multiple documents are being percolated in a single request. + */ +final class PercolatorMatchedSlotSubFetchPhase implements FetchSubPhase { + + static final String FIELD_NAME_PREFIX = "_percolator_document_slot"; + + @Override + public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException { + List percolateQueries = locatePercolatorQuery(context.query()); + if (percolateQueries.isEmpty()) { + return; + } + + boolean singlePercolateQuery = percolateQueries.size() == 1; + for (PercolateQuery percolateQuery : percolateQueries) { + String fieldName = singlePercolateQuery ? FIELD_NAME_PREFIX : FIELD_NAME_PREFIX + "_" + percolateQuery.getName(); + IndexSearcher percolatorIndexSearcher = percolateQuery.getPercolatorIndexSearcher(); + Weight weight = percolatorIndexSearcher.createNormalizedWeight(Queries.newNonNestedFilter(), false); + Scorer s = weight.scorer(percolatorIndexSearcher.getIndexReader().leaves().get(0)); + int memoryIndexMaxDoc = percolatorIndexSearcher.getIndexReader().maxDoc(); + BitSet rootDocs = BitSet.of(s.iterator(), memoryIndexMaxDoc); + int[] rootDocsBySlot = null; + boolean hasNestedDocs = rootDocs.cardinality() != percolatorIndexSearcher.getIndexReader().numDocs(); + if (hasNestedDocs) { + rootDocsBySlot = buildRootDocsSlots(rootDocs); + } + + PercolateQuery.QueryStore queryStore = percolateQuery.getQueryStore(); + List ctxs = context.searcher().getIndexReader().leaves(); + for (SearchHit hit : hits) { + LeafReaderContext ctx = ctxs.get(ReaderUtil.subIndex(hit.docId(), ctxs)); + int segmentDocId = hit.docId() - ctx.docBase; + Query query = queryStore.getQueries(ctx).apply(segmentDocId); + + TopDocs topDocs = percolatorIndexSearcher.search(query, memoryIndexMaxDoc, new Sort(SortField.FIELD_DOC)); + if (topDocs.totalHits == 0) { + // This hit didn't match with a percolate query, + // likely to happen when percolating multiple documents + continue; + } + + Map fields = hit.fieldsOrNull(); + if (fields == null) { + fields = new HashMap<>(); + hit.fields(fields); + } + IntStream slots = convertTopDocsToSlots(topDocs, rootDocsBySlot); + fields.put(fieldName, new DocumentField(fieldName, slots.boxed().collect(Collectors.toList()))); + } + } + } + + static IntStream convertTopDocsToSlots(TopDocs topDocs, int[] rootDocsBySlot) { + IntStream stream = Arrays.stream(topDocs.scoreDocs) + .mapToInt(scoreDoc -> scoreDoc.doc); + if (rootDocsBySlot != null) { + stream = stream.map(docId -> Arrays.binarySearch(rootDocsBySlot, docId)); + } + return stream; + } + + static int[] buildRootDocsSlots(BitSet rootDocs) { + int slot = 0; + int[] rootDocsBySlot = new int[rootDocs.cardinality()]; + BitSetIterator iterator = new BitSetIterator(rootDocs, 0); + for (int rootDocId = iterator.nextDoc(); rootDocId != NO_MORE_DOCS; rootDocId = iterator.nextDoc()) { + rootDocsBySlot[slot++] = rootDocId; + } + return rootDocsBySlot; + } +} diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java index d09599a7af43c..7128060448cf1 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java @@ -27,11 +27,12 @@ import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.fetch.FetchSubPhase; -import java.util.Collections; +import java.util.Arrays; import java.util.List; import java.util.Map; import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; public class PercolatorPlugin extends Plugin implements MapperPlugin, SearchPlugin { @@ -48,17 +49,21 @@ public List> getQueries() { @Override public List getFetchSubPhases(FetchPhaseConstructionContext context) { - return singletonList(new PercolatorHighlightSubFetchPhase(settings, context.getHighlighters())); + return Arrays.asList( + new PercolatorMatchedSlotSubFetchPhase(), + new PercolatorHighlightSubFetchPhase(settings, context.getHighlighters()) + ); } @Override public List> getSettings() { - return Collections.singletonList(PercolatorFieldMapper.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING); + return Arrays.asList(PercolatorFieldMapper.INDEX_MAP_UNMAPPED_FIELDS_AS_TEXT_SETTING, + PercolatorFieldMapper.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING); } @Override public Map getMappers() { - return Collections.singletonMap(PercolatorFieldMapper.CONTENT_TYPE, new PercolatorFieldMapper.TypeParser()); + return singletonMap(PercolatorFieldMapper.CONTENT_TYPE, new PercolatorFieldMapper.TypeParser()); } } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java index 77f937e680cf4..21b79cf29800d 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java @@ -355,8 +355,19 @@ private static BiFunction, Result> functionScoreQuery( private static BiFunction, Result> pointRangeQuery() { return (query, boosts) -> { PointRangeQuery pointRangeQuery = (PointRangeQuery) query; + if (pointRangeQuery.getNumDims() != 1) { + throw new UnsupportedQueryException(query); + } + byte[] lowerPoint = pointRangeQuery.getLowerPoint(); byte[] upperPoint = pointRangeQuery.getUpperPoint(); + + // Need to check whether upper is not smaller than lower, otherwise NumericUtils.subtract(...) fails IAE + // If upper is really smaller than lower then we deal with like MatchNoDocsQuery. (verified and no extractions) + if (new BytesRef(lowerPoint).compareTo(new BytesRef(upperPoint)) > 0) { + return new Result(true, Collections.emptySet()); + } + byte[] interval = new byte[16]; NumericUtils.subtract(16, 0, prepad(upperPoint), prepad(lowerPoint), interval); return new Result(false, Collections.singleton(new QueryExtraction( @@ -449,6 +460,12 @@ static Set selectBestExtraction(Map boostFields, if (onlyRangeBasedExtractions) { BytesRef extraction1SmallestRange = smallestRange(filtered1); BytesRef extraction2SmallestRange = smallestRange(filtered2); + if (extraction1SmallestRange == null) { + return extractions2; + } else if (extraction2SmallestRange == null) { + return extractions1; + } + // Keep the clause with smallest range, this is likely to be the rarest. if (extraction1SmallestRange.compareTo(extraction2SmallestRange) <= 0) { return extractions1; @@ -496,10 +513,10 @@ private static int minTermLength(Set extractions) { } private static BytesRef smallestRange(Set terms) { - BytesRef min = terms.iterator().next().range.interval; + BytesRef min = null; for (QueryExtraction qt : terms) { if (qt.range != null) { - if (qt.range.interval.compareTo(min) < 0) { + if (min == null || qt.range.interval.compareTo(min) < 0) { min = qt.range.interval; } } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 2d78a0db63d23..61f3fd14f9533 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -309,7 +309,7 @@ public void testRangeQueries() throws Exception { MemoryIndex memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new IntPoint("int_field", 3)), new WhitespaceAnalyzer()); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); - Query query = fieldType.percolateQuery(queryStore, new BytesArray("{}"), percolateSearcher); + Query query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher); TopDocs topDocs = shardSearcher.search(query, 1); assertEquals(1L, topDocs.totalHits); assertEquals(1, topDocs.scoreDocs.length); @@ -317,7 +317,7 @@ public void testRangeQueries() throws Exception { memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new LongPoint("long_field", 7L)), new WhitespaceAnalyzer()); percolateSearcher = memoryIndex.createSearcher(); - query = fieldType.percolateQuery(queryStore, new BytesArray("{}"), percolateSearcher); + query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher); topDocs = shardSearcher.search(query, 1); assertEquals(1L, topDocs.totalHits); assertEquals(1, topDocs.scoreDocs.length); @@ -326,7 +326,7 @@ public void testRangeQueries() throws Exception { memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new HalfFloatPoint("half_float_field", 12)), new WhitespaceAnalyzer()); percolateSearcher = memoryIndex.createSearcher(); - query = fieldType.percolateQuery(queryStore, new BytesArray("{}"), percolateSearcher); + query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher); topDocs = shardSearcher.search(query, 1); assertEquals(1L, topDocs.totalHits); assertEquals(1, topDocs.scoreDocs.length); @@ -334,7 +334,7 @@ public void testRangeQueries() throws Exception { memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new FloatPoint("float_field", 17)), new WhitespaceAnalyzer()); percolateSearcher = memoryIndex.createSearcher(); - query = fieldType.percolateQuery(queryStore, new BytesArray("{}"), percolateSearcher); + query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher); topDocs = shardSearcher.search(query, 1); assertEquals(1, topDocs.totalHits); assertEquals(1, topDocs.scoreDocs.length); @@ -342,7 +342,7 @@ public void testRangeQueries() throws Exception { memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new DoublePoint("double_field", 21)), new WhitespaceAnalyzer()); percolateSearcher = memoryIndex.createSearcher(); - query = fieldType.percolateQuery(queryStore, new BytesArray("{}"), percolateSearcher); + query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher); topDocs = shardSearcher.search(query, 1); assertEquals(1, topDocs.totalHits); assertEquals(1, topDocs.scoreDocs.length); @@ -351,7 +351,7 @@ public void testRangeQueries() throws Exception { memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new InetAddressPoint("ip_field", forString("192.168.0.4"))), new WhitespaceAnalyzer()); percolateSearcher = memoryIndex.createSearcher(); - query = fieldType.percolateQuery(queryStore, new BytesArray("{}"), percolateSearcher); + query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher); topDocs = shardSearcher.search(query, 1); assertEquals(1, topDocs.totalHits); assertEquals(1, topDocs.scoreDocs.length); @@ -464,7 +464,8 @@ public void testDuelRangeQueries() throws Exception { private void duelRun(PercolateQuery.QueryStore queryStore, MemoryIndex memoryIndex, IndexSearcher shardSearcher) throws IOException { boolean requireScore = randomBoolean(); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); - Query percolateQuery = fieldType.percolateQuery(queryStore, new BytesArray("{}"), percolateSearcher); + Query percolateQuery = fieldType.percolateQuery("_name", queryStore, + Collections.singletonList(new BytesArray("{}")), percolateSearcher); Query query = requireScore ? percolateQuery : new ConstantScoreQuery(percolateQuery); TopDocs topDocs = shardSearcher.search(query, 10); @@ -497,7 +498,8 @@ private TopDocs executeQuery(PercolateQuery.QueryStore queryStore, MemoryIndex memoryIndex, IndexSearcher shardSearcher) throws IOException { IndexSearcher percolateSearcher = memoryIndex.createSearcher(); - Query percolateQuery = fieldType.percolateQuery(queryStore, new BytesArray("{}"), percolateSearcher); + Query percolateQuery = fieldType.percolateQuery("_name", queryStore, + Collections.singletonList(new BytesArray("{}")), percolateSearcher); return shardSearcher.search(percolateQuery, 10); } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index 30713d61abe07..655c0d508ec5f 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; @@ -53,9 +54,13 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Base64; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import static org.hamcrest.Matchers.equalTo; @@ -63,7 +68,10 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase { - private static final String[] SHUFFLE_PROTECTED_FIELDS = new String[] { PercolateQueryBuilder.DOCUMENT_FIELD.getPreferredName()}; + private static final String[] SHUFFLE_PROTECTED_FIELDS = new String[] { + PercolateQueryBuilder.DOCUMENT_FIELD.getPreferredName(), + PercolateQueryBuilder.DOCUMENTS_FIELD.getPreferredName() + }; private static String queryField; private static String docType; @@ -74,7 +82,7 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase documentSource; private boolean indexedDocumentExists = true; @@ -104,7 +112,18 @@ protected PercolateQueryBuilder doCreateTestQueryBuilder() { } private PercolateQueryBuilder doCreateTestQueryBuilder(boolean indexedDocument) { - documentSource = randomSource(); + if (indexedDocument) { + documentSource = Collections.singletonList(randomSource(new HashSet<>())); + } else { + int numDocs = randomIntBetween(1, 8); + documentSource = new ArrayList<>(numDocs); + Set usedFields = new HashSet<>(); + for (int i = 0; i < numDocs; i++) { + documentSource.add(randomSource(usedFields)); + } + } + + PercolateQueryBuilder queryBuilder; if (indexedDocument) { indexedDocumentIndex = randomAlphaOfLength(4); indexedDocumentType = "doc"; @@ -112,11 +131,15 @@ private PercolateQueryBuilder doCreateTestQueryBuilder(boolean indexedDocument) indexedDocumentRouting = randomAlphaOfLength(4); indexedDocumentPreference = randomAlphaOfLength(4); indexedDocumentVersion = (long) randomIntBetween(0, Integer.MAX_VALUE); - return new PercolateQueryBuilder(queryField, docType, indexedDocumentIndex, indexedDocumentType, indexedDocumentId, + queryBuilder = new PercolateQueryBuilder(queryField, docType, indexedDocumentIndex, indexedDocumentType, indexedDocumentId, indexedDocumentRouting, indexedDocumentPreference, indexedDocumentVersion); } else { - return new PercolateQueryBuilder(queryField, docType, documentSource, XContentType.JSON); + queryBuilder = new PercolateQueryBuilder(queryField, docType, documentSource, XContentType.JSON); } + if (randomBoolean()) { + queryBuilder.setName(randomAlphaOfLength(4)); + } + return queryBuilder; } /** @@ -139,8 +162,8 @@ protected GetResponse executeGet(GetRequest getRequest) { assertThat(getRequest.version(), Matchers.equalTo(indexedDocumentVersion)); if (indexedDocumentExists) { return new GetResponse( - new GetResult(indexedDocumentIndex, indexedDocumentType, indexedDocumentId, 0L, true, documentSource, - Collections.emptyMap()) + new GetResult(indexedDocumentIndex, indexedDocumentType, indexedDocumentId, 0L, true, + documentSource.iterator().next(), Collections.emptyMap()) ); } else { return new GetResponse( @@ -154,7 +177,7 @@ protected void doAssertLuceneQuery(PercolateQueryBuilder queryBuilder, Query que assertThat(query, Matchers.instanceOf(PercolateQuery.class)); PercolateQuery percolateQuery = (PercolateQuery) query; assertThat(docType, Matchers.equalTo(queryBuilder.getDocumentType())); - assertThat(percolateQuery.getDocumentSource(), Matchers.equalTo(documentSource)); + assertThat(percolateQuery.getDocuments(), Matchers.equalTo(documentSource)); } @Override @@ -181,12 +204,13 @@ public void testIndexedDocumentDoesNotExist() throws IOException { @Override protected Set getObjectsHoldingArbitraryContent() { //document contains arbitrary content, no error expected when an object is added to it - return Collections.singleton(PercolateQueryBuilder.DOCUMENT_FIELD.getPreferredName()); + return new HashSet<>(Arrays.asList(PercolateQueryBuilder.DOCUMENT_FIELD.getPreferredName(), + PercolateQueryBuilder.DOCUMENTS_FIELD.getPreferredName())); } public void testRequiredParameters() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { - new PercolateQueryBuilder(null, null, new BytesArray("{}"), XContentType.JSON); + new PercolateQueryBuilder(null, new BytesArray("{}"), XContentType.JSON); }); assertThat(e.getMessage(), equalTo("[field] is a required argument")); @@ -227,16 +251,42 @@ public void testFromJsonNoDocumentType() throws IOException { } } - public void testCreateMultiDocumentSearcher() throws Exception { - int numDocs = randomIntBetween(2, 8); - List docs = new ArrayList<>(numDocs); - for (int i = 0; i < numDocs; i++) { + public void testBothDocumentAndDocumentsSpecified() throws IOException { + expectThrows(IllegalArgumentException.class, + () -> parseQuery("{\"percolate\" : { \"document\": {}, \"documents\": [{}, {}], \"field\":\"" + queryField + "\"}}")); + } + + public void testCreateNestedDocumentSearcher() throws Exception { + int numNestedDocs = randomIntBetween(2, 8); + List docs = new ArrayList<>(numNestedDocs); + for (int i = 0; i < numNestedDocs; i++) { docs.add(new ParseContext.Document()); } + Collection parsedDocument = Collections.singleton( + new ParsedDocument(null, null, "_id", "_type", null, docs, null, null, null)); Analyzer analyzer = new WhitespaceAnalyzer(); - ParsedDocument parsedDocument = new ParsedDocument(null, null, "_id", "_type", null, docs, null, null, null); IndexSearcher indexSearcher = PercolateQueryBuilder.createMultiDocumentSearcher(analyzer, parsedDocument); + assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(numNestedDocs)); + + // ensure that any query get modified so that the nested docs are never included as hits: + Query query = new MatchAllDocsQuery(); + BooleanQuery result = (BooleanQuery) indexSearcher.createNormalizedWeight(query, true).getQuery(); + assertThat(result.clauses().size(), equalTo(2)); + assertThat(result.clauses().get(0).getQuery(), sameInstance(query)); + assertThat(result.clauses().get(0).getOccur(), equalTo(BooleanClause.Occur.MUST)); + assertThat(result.clauses().get(1).getOccur(), equalTo(BooleanClause.Occur.MUST_NOT)); + } + + public void testCreateMultiDocumentSearcher() throws Exception { + int numDocs = randomIntBetween(2, 8); + List docs = new ArrayList<>(); + for (int i = 0; i < numDocs; i++) { + docs.add(new ParsedDocument(null, null, "_id", "_type", null, + Collections.singletonList(new ParseContext.Document()), null, null, null)); + } + Analyzer analyzer = new WhitespaceAnalyzer(); + IndexSearcher indexSearcher = PercolateQueryBuilder.createMultiDocumentSearcher(analyzer, docs); assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(numDocs)); // ensure that any query get modified so that the nested docs are never included as hits: @@ -248,10 +298,46 @@ public void testCreateMultiDocumentSearcher() throws Exception { assertThat(result.clauses().get(1).getOccur(), equalTo(BooleanClause.Occur.MUST_NOT)); } - private static BytesReference randomSource() { + public void testSerializationBwc() throws IOException { + final byte[] data = Base64.getDecoder().decode("P4AAAAAFZmllbGQEdHlwZQAAAAAAAA57ImZvbyI6ImJhciJ9AAAAAA=="); + final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2, + Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0); + try (StreamInput in = StreamInput.wrap(data)) { + in.setVersion(version); + PercolateQueryBuilder queryBuilder = new PercolateQueryBuilder(in); + assertEquals("type", queryBuilder.getDocumentType()); + assertEquals("field", queryBuilder.getField()); + assertEquals("{\"foo\":\"bar\"}", queryBuilder.getDocuments().iterator().next().utf8ToString()); + assertEquals(XContentType.JSON, queryBuilder.getXContentType()); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.setVersion(version); + queryBuilder.writeTo(out); + assertArrayEquals(data, out.bytes().toBytesRef().bytes); + } + } + } + + private static BytesReference randomSource(Set usedFields) { try { + // If we create two source that have the same field, but these fields have different kind of values (str vs. lng) then + // when these source get indexed, indexing can fail. To solve this test issue, we should generate source that + // always have unique fields: + Map source; + boolean duplicateField; + do { + duplicateField = false; + source = RandomDocumentPicks.randomSource(random()); + for (String field : source.keySet()) { + if (usedFields.add(field) == false) { + duplicateField = true; + break; + } + } + } while (duplicateField); + XContentBuilder xContent = XContentFactory.jsonBuilder(); - xContent.map(RandomDocumentPicks.randomSource(random())); + xContent.map(source); return xContent.bytes(); } catch (IOException e) { throw new RuntimeException(e); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryTests.java index c76ac14cffbb2..ac9cc97499ce6 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryTests.java @@ -116,7 +116,7 @@ public void testPercolateQuery() throws Exception { memoryIndex.addField("field", "the quick brown fox jumps over the lazy dog", new WhitespaceAnalyzer()); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); // no scoring, wrapping it in a constant score query: - Query query = new ConstantScoreQuery(new PercolateQuery(queryStore, new BytesArray("a"), + Query query = new ConstantScoreQuery(new PercolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("a")), new TermQuery(new Term("select", "a")), percolateSearcher, new MatchNoDocsQuery(""))); TopDocs topDocs = shardSearcher.search(query, 10); assertThat(topDocs.totalHits, equalTo(1L)); @@ -126,7 +126,7 @@ public void testPercolateQuery() throws Exception { assertThat(explanation.isMatch(), is(true)); assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[0].score)); - query = new ConstantScoreQuery(new PercolateQuery(queryStore, new BytesArray("b"), + query = new ConstantScoreQuery(new PercolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("b")), new TermQuery(new Term("select", "b")), percolateSearcher, new MatchNoDocsQuery(""))); topDocs = shardSearcher.search(query, 10); assertThat(topDocs.totalHits, equalTo(3L)); @@ -146,13 +146,13 @@ public void testPercolateQuery() throws Exception { assertThat(explanation.isMatch(), is(true)); assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[2].score)); - query = new ConstantScoreQuery(new PercolateQuery(queryStore, new BytesArray("c"), + query = new ConstantScoreQuery(new PercolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("c")), new MatchAllDocsQuery(), percolateSearcher, new MatchAllDocsQuery())); topDocs = shardSearcher.search(query, 10); assertThat(topDocs.totalHits, equalTo(4L)); - query = new PercolateQuery(queryStore, new BytesArray("{}"), new TermQuery(new Term("select", "b")), - percolateSearcher, new MatchNoDocsQuery("")); + query = new PercolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), + new TermQuery(new Term("select", "b")), percolateSearcher, new MatchNoDocsQuery("")); topDocs = shardSearcher.search(query, 10); assertThat(topDocs.totalHits, equalTo(3L)); assertThat(topDocs.scoreDocs.length, equalTo(3)); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java index 0f6b60354e690..441278d23f87a 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java @@ -195,10 +195,10 @@ public void testExtractRanges() throws Exception { addQueryFieldMappings(); BooleanQuery.Builder bq = new BooleanQuery.Builder(); Query rangeQuery1 = mapperService.documentMapper("doc").mappers().getMapper("number_field1").fieldType() - .rangeQuery(10, 20, true, true, null); + .rangeQuery(10, 20, true, true, null, null, null, null); bq.add(rangeQuery1, Occur.MUST); Query rangeQuery2 = mapperService.documentMapper("doc").mappers().getMapper("number_field1").fieldType() - .rangeQuery(15, 20, true, true, null); + .rangeQuery(15, 20, true, true, null, null, null, null); bq.add(rangeQuery2, Occur.MUST); DocumentMapper documentMapper = mapperService.documentMapper("doc"); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhaseTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhaseTests.java index fb2a5f8bdc16f..f1b89d92ab11e 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhaseTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhaseTests.java @@ -38,16 +38,15 @@ import java.util.Collections; import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; public class PercolatorHighlightSubFetchPhaseTests extends ESTestCase { public void testHitsExecutionNeeded() { - PercolateQuery percolateQuery = new PercolateQuery( - ctx -> null, new BytesArray("{}"), new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery() - ); + PercolateQuery percolateQuery = new PercolateQuery("_name", ctx -> null, Collections.singletonList(new BytesArray("{}")), + new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery()); PercolatorHighlightSubFetchPhase subFetchPhase = new PercolatorHighlightSubFetchPhase(Settings.EMPTY, emptyMap()); SearchContext searchContext = Mockito.mock(SearchContext.class); @@ -60,35 +59,50 @@ public void testHitsExecutionNeeded() { } public void testLocatePercolatorQuery() { - PercolateQuery percolateQuery = new PercolateQuery( - ctx -> null, new BytesArray("{}"), new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery() - ); - assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(new MatchAllDocsQuery()), nullValue()); + PercolateQuery percolateQuery = new PercolateQuery("_name", ctx -> null, Collections.singletonList(new BytesArray("{}")), + new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery()); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(new MatchAllDocsQuery()).size(), equalTo(0)); BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(new MatchAllDocsQuery(), BooleanClause.Occur.FILTER); - assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(bq.build()), nullValue()); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(bq.build()).size(), equalTo(0)); bq.add(percolateQuery, BooleanClause.Occur.FILTER); - assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(bq.build()), sameInstance(percolateQuery)); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(bq.build()).size(), equalTo(1)); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(bq.build()).get(0), sameInstance(percolateQuery)); ConstantScoreQuery constantScoreQuery = new ConstantScoreQuery(new MatchAllDocsQuery()); - assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(constantScoreQuery), nullValue()); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(constantScoreQuery).size(), equalTo(0)); constantScoreQuery = new ConstantScoreQuery(percolateQuery); - assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(constantScoreQuery), sameInstance(percolateQuery)); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(constantScoreQuery).size(), equalTo(1)); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(constantScoreQuery).get(0), sameInstance(percolateQuery)); BoostQuery boostQuery = new BoostQuery(new MatchAllDocsQuery(), 1f); - assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(boostQuery), nullValue()); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(boostQuery).size(), equalTo(0)); boostQuery = new BoostQuery(percolateQuery, 1f); - assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(boostQuery), sameInstance(percolateQuery)); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(boostQuery).size(), equalTo(1)); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(boostQuery).get(0), sameInstance(percolateQuery)); FunctionScoreQuery functionScoreQuery = new FunctionScoreQuery(new MatchAllDocsQuery(), new RandomScoreFunction(0, 0, null)); - assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(functionScoreQuery), nullValue()); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(functionScoreQuery).size(), equalTo(0)); functionScoreQuery = new FunctionScoreQuery(percolateQuery, new RandomScoreFunction(0, 0, null)); - assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(functionScoreQuery), sameInstance(percolateQuery)); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(functionScoreQuery).size(), equalTo(1)); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(functionScoreQuery).get(0), sameInstance(percolateQuery)); - DisjunctionMaxQuery disjunctionMaxQuery = new DisjunctionMaxQuery(Arrays.asList(new MatchAllDocsQuery()), 1f); - assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(disjunctionMaxQuery), nullValue()); + DisjunctionMaxQuery disjunctionMaxQuery = new DisjunctionMaxQuery(Collections.singleton(new MatchAllDocsQuery()), 1f); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(disjunctionMaxQuery).size(), equalTo(0)); disjunctionMaxQuery = new DisjunctionMaxQuery(Arrays.asList(percolateQuery, new MatchAllDocsQuery()), 1f); - assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(disjunctionMaxQuery), sameInstance(percolateQuery)); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(disjunctionMaxQuery).size(), equalTo(1)); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(disjunctionMaxQuery).get(0), sameInstance(percolateQuery)); + + PercolateQuery percolateQuery2 = new PercolateQuery("_name", ctx -> null, Collections.singletonList(new BytesArray("{}")), + new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery()); + bq = new BooleanQuery.Builder(); + bq.add(new MatchAllDocsQuery(), BooleanClause.Occur.FILTER); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(bq.build()).size(), equalTo(0)); + bq.add(percolateQuery, BooleanClause.Occur.FILTER); + bq.add(percolateQuery2, BooleanClause.Occur.FILTER); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(bq.build()).size(), equalTo(2)); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(bq.build()).get(0), sameInstance(percolateQuery)); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(bq.build()).get(1), sameInstance(percolateQuery2)); } } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java new file mode 100644 index 0000000000000..d4b48174d76d1 --- /dev/null +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.percolator; + +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.util.FixedBitSet; +import org.elasticsearch.test.ESTestCase; + +import java.util.stream.IntStream; + +public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase { + + public void testConvertTopDocsToSlots() { + ScoreDoc[] scoreDocs = new ScoreDoc[randomInt(128)]; + for (int i = 0; i < scoreDocs.length; i++) { + scoreDocs[i] = new ScoreDoc(i, 1f); + } + + TopDocs topDocs = new TopDocs(scoreDocs.length, scoreDocs, 1f); + IntStream stream = PercolatorMatchedSlotSubFetchPhase.convertTopDocsToSlots(topDocs, null); + + int[] result = stream.toArray(); + assertEquals(scoreDocs.length, result.length); + for (int i = 0; i < scoreDocs.length; i++) { + assertEquals(scoreDocs[i].doc, result[i]); + } + } + + public void testConvertTopDocsToSlots_nestedDocs() { + ScoreDoc[] scoreDocs = new ScoreDoc[5]; + scoreDocs[0] = new ScoreDoc(2, 1f); + scoreDocs[1] = new ScoreDoc(5, 1f); + scoreDocs[2] = new ScoreDoc(8, 1f); + scoreDocs[3] = new ScoreDoc(11, 1f); + scoreDocs[4] = new ScoreDoc(14, 1f); + TopDocs topDocs = new TopDocs(scoreDocs.length, scoreDocs, 1f); + + FixedBitSet bitSet = new FixedBitSet(15); + bitSet.set(2); + bitSet.set(5); + bitSet.set(8); + bitSet.set(11); + bitSet.set(14); + + int[] rootDocsBySlot = PercolatorMatchedSlotSubFetchPhase.buildRootDocsSlots(bitSet); + int[] result = PercolatorMatchedSlotSubFetchPhase.convertTopDocsToSlots(topDocs, rootDocsBySlot).toArray(); + assertEquals(scoreDocs.length, result.length); + assertEquals(0, result[0]); + assertEquals(1, result[1]); + assertEquals(2, result[2]); + assertEquals(3, result[3]); + assertEquals(4, result[4]); + } + +} diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java index 17833864a42e3..c08069b1af775 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java @@ -23,6 +23,8 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; @@ -35,11 +37,17 @@ import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; +import java.util.Arrays; +import java.util.Collections; + import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.smileBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.yamlBuilder; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.commonTermsQuery; +import static org.elasticsearch.index.query.QueryBuilders.geoBoundingBoxQuery; +import static org.elasticsearch.index.query.QueryBuilders.geoDistanceQuery; +import static org.elasticsearch.index.query.QueryBuilders.geoPolygonQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; @@ -93,7 +101,9 @@ public void testPercolatorQuery() throws Exception { .get(); assertHitCount(response, 2); assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); source = jsonBuilder().startObject().field("field1", "value").field("field2", "value").endObject().bytes(); logger.info("percolating doc with 2 fields"); @@ -103,8 +113,27 @@ public void testPercolatorQuery() throws Exception { .get(); assertHitCount(response, 3); assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + + logger.info("percolating doc with 2 fields"); + response = client().prepareSearch() + .setQuery(new PercolateQueryBuilder("query", Arrays.asList( + jsonBuilder().startObject().field("field1", "value").endObject().bytes(), + jsonBuilder().startObject().field("field1", "value").field("field2", "value").endObject().bytes() + ), XContentType.JSON)) + .addSort("_uid", SortOrder.ASC) + .get(); + assertHitCount(response, 3); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0, 1))); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0, 1))); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(1))); } public void testPercolatorRangeQueries() throws Exception { @@ -216,6 +245,40 @@ public void testPercolatorRangeQueries() throws Exception { assertThat(response.getHits().getAt(0).getId(), equalTo("10")); } + public void testPercolatorGeoQueries() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type", "field1", "type=geo_point", "field2", "type=geo_shape", "query", "type=percolator") + ); + + client().prepareIndex("test", "type", "1") + .setSource(jsonBuilder().startObject().field("query", + geoDistanceQuery("field1").point(52.18, 4.38).distance(50, DistanceUnit.KILOMETERS)) + .endObject()).get(); + + client().prepareIndex("test", "type", "2") + .setSource(jsonBuilder().startObject().field("query", + geoBoundingBoxQuery("field1").setCorners(52.3, 4.4, 52.1, 4.6)) + .endObject()).get(); + + client().prepareIndex("test", "type", "3") + .setSource(jsonBuilder().startObject().field("query", + geoPolygonQuery("field1", Arrays.asList(new GeoPoint(52.1, 4.4), new GeoPoint(52.3, 4.5), new GeoPoint(52.1, 4.6)))) + .endObject()).get(); + refresh(); + + BytesReference source = jsonBuilder().startObject() + .startObject("field1").field("lat", 52.20).field("lon", 4.51).endObject() + .endObject().bytes(); + SearchResponse response = client().prepareSearch() + .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) + .addSort("_id", SortOrder.ASC) + .get(); + assertHitCount(response, 3); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + public void testPercolatorQueryExistingDocument() throws Exception { assertAcked(client().admin().indices().prepareCreate("test") .addMapping("type", "field1", "type=keyword", "field2", "type=keyword", "query", "type=percolator") @@ -405,6 +468,119 @@ public void testPercolatorQueryWithHighlighting() throws Exception { equalTo("The quick brown fox jumps over the lazy dog")); assertThat(searchResponse.getHits().getAt(4).getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy dog")); + + BytesReference document1 = jsonBuilder().startObject() + .field("field1", "The quick brown fox jumps") + .endObject().bytes(); + BytesReference document2 = jsonBuilder().startObject() + .field("field1", "over the lazy dog") + .endObject().bytes(); + searchResponse = client().prepareSearch() + .setQuery(boolQuery() + .should(new PercolateQueryBuilder("query", document1, XContentType.JSON).setName("query1")) + .should(new PercolateQueryBuilder("query", document2, XContentType.JSON).setName("query2")) + ) + .highlighter(new HighlightBuilder().field("field1")) + .addSort("_uid", SortOrder.ASC) + .get(); + logger.info("searchResponse={}", searchResponse); + assertHitCount(searchResponse, 5); + + assertThat(searchResponse.getHits().getAt(0).getHighlightFields().get("query1_field1").fragments()[0].string(), + equalTo("The quick brown fox jumps")); + assertThat(searchResponse.getHits().getAt(1).getHighlightFields().get("query2_field1").fragments()[0].string(), + equalTo("over the lazy dog")); + assertThat(searchResponse.getHits().getAt(2).getHighlightFields().get("query1_field1").fragments()[0].string(), + equalTo("The quick brown fox jumps")); + assertThat(searchResponse.getHits().getAt(3).getHighlightFields().get("query2_field1").fragments()[0].string(), + equalTo("over the lazy dog")); + assertThat(searchResponse.getHits().getAt(4).getHighlightFields().get("query1_field1").fragments()[0].string(), + equalTo("The quick brown fox jumps")); + + searchResponse = client().prepareSearch() + .setQuery(new PercolateQueryBuilder("query", Arrays.asList( + jsonBuilder().startObject().field("field1", "dog").endObject().bytes(), + jsonBuilder().startObject().field("field1", "fox").endObject().bytes(), + jsonBuilder().startObject().field("field1", "jumps").endObject().bytes(), + jsonBuilder().startObject().field("field1", "brown fox").endObject().bytes() + ), XContentType.JSON)) + .highlighter(new HighlightBuilder().field("field1")) + .addSort("_uid", SortOrder.ASC) + .get(); + assertHitCount(searchResponse, 5); + assertThat(searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), + equalTo(Arrays.asList(1, 3))); + assertThat(searchResponse.getHits().getAt(0).getHighlightFields().get("1_field1").fragments()[0].string(), + equalTo("fox")); + assertThat(searchResponse.getHits().getAt(0).getHighlightFields().get("3_field1").fragments()[0].string(), + equalTo("brown fox")); + assertThat(searchResponse.getHits().getAt(1).getFields().get("_percolator_document_slot").getValues(), + equalTo(Collections.singletonList(0))); + assertThat(searchResponse.getHits().getAt(1).getHighlightFields().get("0_field1").fragments()[0].string(), + equalTo("dog")); + assertThat(searchResponse.getHits().getAt(2).getFields().get("_percolator_document_slot").getValues(), + equalTo(Collections.singletonList(2))); + assertThat(searchResponse.getHits().getAt(2).getHighlightFields().get("2_field1").fragments()[0].string(), + equalTo("jumps")); + assertThat(searchResponse.getHits().getAt(3).getFields().get("_percolator_document_slot").getValues(), + equalTo(Collections.singletonList(0))); + assertThat(searchResponse.getHits().getAt(3).getHighlightFields().get("0_field1").fragments()[0].string(), + equalTo("dog")); + assertThat(searchResponse.getHits().getAt(4).getFields().get("_percolator_document_slot").getValues(), + equalTo(Arrays.asList(1, 3))); + assertThat(searchResponse.getHits().getAt(4).getHighlightFields().get("1_field1").fragments()[0].string(), + equalTo("fox")); + assertThat(searchResponse.getHits().getAt(4).getHighlightFields().get("3_field1").fragments()[0].string(), + equalTo("brown fox")); + + searchResponse = client().prepareSearch() + .setQuery(boolQuery() + .should(new PercolateQueryBuilder("query", Arrays.asList( + jsonBuilder().startObject().field("field1", "dog").endObject().bytes(), + jsonBuilder().startObject().field("field1", "fox").endObject().bytes() + ), XContentType.JSON).setName("query1")) + .should(new PercolateQueryBuilder("query", Arrays.asList( + jsonBuilder().startObject().field("field1", "jumps").endObject().bytes(), + jsonBuilder().startObject().field("field1", "brown fox").endObject().bytes() + ), XContentType.JSON).setName("query2")) + ) + .highlighter(new HighlightBuilder().field("field1")) + .addSort("_uid", SortOrder.ASC) + .get(); + logger.info("searchResponse={}", searchResponse); + assertHitCount(searchResponse, 5); + assertThat(searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot_query1").getValues(), + equalTo(Collections.singletonList(1))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot_query2").getValues(), + equalTo(Collections.singletonList(1))); + assertThat(searchResponse.getHits().getAt(0).getHighlightFields().get("query1_1_field1").fragments()[0].string(), + equalTo("fox")); + assertThat(searchResponse.getHits().getAt(0).getHighlightFields().get("query2_1_field1").fragments()[0].string(), + equalTo("brown fox")); + + assertThat(searchResponse.getHits().getAt(1).getFields().get("_percolator_document_slot_query1").getValues(), + equalTo(Collections.singletonList(0))); + assertThat(searchResponse.getHits().getAt(1).getHighlightFields().get("query1_0_field1").fragments()[0].string(), + equalTo("dog")); + + assertThat(searchResponse.getHits().getAt(2).getFields().get("_percolator_document_slot_query2").getValues(), + equalTo(Collections.singletonList(0))); + assertThat(searchResponse.getHits().getAt(2).getHighlightFields().get("query2_0_field1").fragments()[0].string(), + equalTo("jumps")); + + assertThat(searchResponse.getHits().getAt(3).getFields().get("_percolator_document_slot_query1").getValues(), + equalTo(Collections.singletonList(0))); + assertThat(searchResponse.getHits().getAt(3).getHighlightFields().get("query1_0_field1").fragments()[0].string(), + equalTo("dog")); + + assertThat(searchResponse.getHits().getAt(4).getFields().get("_percolator_document_slot_query1").getValues(), + equalTo(Collections.singletonList(1))); + assertThat(searchResponse.getHits().getAt(4).getFields().get("_percolator_document_slot_query2").getValues(), + equalTo(Collections.singletonList(1))); + assertThat(searchResponse.getHits().getAt(4).getHighlightFields().get("query1_1_field1").fragments()[0].string(), + equalTo("fox")); + assertThat(searchResponse.getHits().getAt(4).getHighlightFields().get("query2_1_field1").fragments()[0].string(), + equalTo("brown fox")); } public void testTakePositionOffsetGapIntoAccount() throws Exception { @@ -422,7 +598,7 @@ public void testTakePositionOffsetGapIntoAccount() throws Exception { client().admin().indices().prepareRefresh().get(); SearchResponse response = client().prepareSearch().setQuery( - new PercolateQueryBuilder("query", null, new BytesArray("{\"field\" : [\"brown\", \"fox\"]}"), XContentType.JSON) + new PercolateQueryBuilder("query", new BytesArray("{\"field\" : [\"brown\", \"fox\"]}"), XContentType.JSON) ).get(); assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("2")); @@ -573,6 +749,29 @@ public void testPercolateQueryWithNestedDocuments() throws Exception { .addSort("_doc", SortOrder.ASC) .get(); assertHitCount(response, 0); + + response = client().prepareSearch() + .setQuery(new PercolateQueryBuilder("query", Arrays.asList( + XContentFactory.jsonBuilder() + .startObject().field("companyname", "stark") + .startArray("employee") + .startObject().field("name", "virginia potts").endObject() + .startObject().field("name", "tony stark").endObject() + .endArray() + .endObject().bytes(), + XContentFactory.jsonBuilder() + .startObject().field("companyname", "stark") + .startArray("employee") + .startObject().field("name", "peter parker").endObject() + .startObject().field("name", "virginia potts").endObject() + .endArray() + .endObject().bytes() + ), XContentType.JSON)) + .addSort("_doc", SortOrder.ASC) + .get(); + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("q1")); + assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0, 1))); } public void testPercolatorQueryViaMultiSearch() throws Exception { diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java index 020280670c45f..15a33f2090b9f 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java @@ -21,6 +21,7 @@ import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -46,6 +47,7 @@ import java.util.function.Function; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; @@ -99,7 +101,7 @@ public void testPercolateQueryWithNestedDocuments_doNotLeakBitsetCacheEntries() ); client().prepareIndex("test", "employee", "q1").setSource(jsonBuilder().startObject() .field("query", QueryBuilders.nestedQuery("employee", - QueryBuilders.matchQuery("employee.name", "virginia potts").operator(Operator.AND), ScoreMode.Avg) + matchQuery("employee.name", "virginia potts").operator(Operator.AND), ScoreMode.Avg) ).endObject()) .get(); client().admin().indices().prepareRefresh().get(); @@ -202,4 +204,37 @@ public void testPercolateQueryWithNestedDocuments_doLeakFieldDataCacheEntries() assertEquals("The percolator works with in-memory index and therefor shouldn't use field-data cache", 0L, fieldDataSize); } + public void testMapUnmappedFieldAsText() throws IOException { + Settings.Builder settings = Settings.builder() + .put("index.percolator.map_unmapped_fields_as_text", true); + createIndex("test", settings.build(), "query", "query", "type=percolator"); + client().prepareIndex("test", "query", "1") + .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "value")).endObject()).get(); + client().admin().indices().prepareRefresh().get(); + + SearchResponse response = client().prepareSearch("test") + .setQuery(new PercolateQueryBuilder("query", jsonBuilder().startObject().field("field1", "value").endObject().bytes(), + XContentType.JSON)) + .get(); + assertHitCount(response, 1); + assertSearchHits(response, "1"); + } + + public void testMapUnmappedFieldAsString() throws IOException { + Settings.Builder settings = Settings.builder() + .put("index.percolator.map_unmapped_fields_as_string", true); + createIndex("test", settings.build(), "query", "query", "type=percolator"); + client().prepareIndex("test", "query", "1") + .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "value")).endObject()).get(); + client().admin().indices().prepareRefresh().get(); + + SearchResponse response = client().prepareSearch("test") + .setQuery(new PercolateQueryBuilder("query", jsonBuilder().startObject().field("field1", "value").endObject().bytes(), + XContentType.JSON)) + .get(); + assertHitCount(response, 1); + assertSearchHits(response, "1"); + assertSettingDeprecationsAndWarnings(new Setting[]{PercolatorFieldMapper.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING}); + } + } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java index e1e28b2bbee3b..ff70c6644601b 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java @@ -23,6 +23,7 @@ import org.apache.lucene.document.HalfFloatPoint; import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.document.IntPoint; +import org.apache.lucene.document.LatLonPoint; import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.Term; @@ -588,6 +589,21 @@ public void testSelectBestExtraction() { queryTerms2 = terms(new int[]{2, 3, 4}, "1", "456"); result = selectBestExtraction(Collections.emptyMap(), queryTerms1, queryTerms2); assertSame("Ignoring ranges, so then prefer queryTerms1, because it has the longest shortest term", queryTerms1, result); + + queryTerms1 = terms(new int[]{}); + queryTerms2 = terms(new int[]{}); + result = selectBestExtraction(Collections.emptyMap(), queryTerms1, queryTerms2); + assertSame("In case query extractions are empty", queryTerms2, result); + + queryTerms1 = terms(new int[]{1}); + queryTerms2 = terms(new int[]{}); + result = selectBestExtraction(Collections.emptyMap(), queryTerms1, queryTerms2); + assertSame("In case query a single extraction is empty", queryTerms1, result); + + queryTerms1 = terms(new int[]{}); + queryTerms2 = terms(new int[]{1}); + result = selectBestExtraction(Collections.emptyMap(), queryTerms1, queryTerms2); + assertSame("In case query a single extraction is empty", queryTerms2, result); } public void testSelectBestExtraction_boostFields() { @@ -743,6 +759,22 @@ public void testPointRangeQuery() { assertArrayEquals(ranges.get(0).range.upperPoint, InetAddressPoint.encode(InetAddresses.forString("192.168.1.255"))); } + public void testTooManyPointDimensions() { + // For now no extraction support for geo queries: + Query query1 = LatLonPoint.newBoxQuery("_field", 0, 1, 0, 1); + expectThrows(UnsupportedQueryException.class, () -> analyze(query1, Collections.emptyMap())); + + Query query2 = LongPoint.newRangeQuery("_field", new long[]{0, 0, 0}, new long[]{1, 1, 1}); + expectThrows(UnsupportedQueryException.class, () -> analyze(query2, Collections.emptyMap())); + } + + public void testPointRangeQuery_lowerUpperReversed() { + Query query = IntPoint.newRangeQuery("_field", 20, 10); + Result result = analyze(query, Collections.emptyMap()); + assertTrue(result.verified); + assertThat(result.extractions.size(), equalTo(0)); + } + public void testIndexOrDocValuesQuery() { Query query = new IndexOrDocValuesQuery(IntPoint.newRangeQuery("_field", 10, 20), SortedNumericDocValuesField.newSlowRangeQuery("_field", 10, 20)); diff --git a/modules/reindex/src/main/plugin-metadata/plugin-security.policy b/modules/reindex/src/main/plugin-metadata/plugin-security.policy index 39c1d77277169..70fb51b845ce1 100644 --- a/modules/reindex/src/main/plugin-metadata/plugin-security.policy +++ b/modules/reindex/src/main/plugin-metadata/plugin-security.policy @@ -22,7 +22,7 @@ grant { permission java.net.SocketPermission "*", "connect"; }; -grant codeBase "${codebase.elasticsearch-rest-client-7.0.0-alpha1-SNAPSHOT.jar}" { +grant codeBase "${codebase.elasticsearch-rest-client}" { // rest client uses system properties which gets the default proxy permission java.net.NetPermission "getProxySelector"; }; diff --git a/plugins/analysis-icu/build.gradle b/plugins/analysis-icu/build.gradle index 2a8905e080f0f..123db9fc4a575 100644 --- a/plugins/analysis-icu/build.gradle +++ b/plugins/analysis-icu/build.gradle @@ -22,6 +22,12 @@ esplugin { classname 'org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin' } +forbiddenApis { + signatures += [ + "com.ibm.icu.text.Collator#getInstance() @ Don't use default locale, use getInstance(ULocale) instead" + ] +} + dependencies { compile "org.apache.lucene:lucene-analyzers-icu:${versions.lucene}" compile 'com.ibm.icu:icu4j:59.1' diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/CollationFieldTypeTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/CollationFieldTypeTests.java index 0117caf1147b9..71d8f25bf9f3b 100644 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/CollationFieldTypeTests.java +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/CollationFieldTypeTests.java @@ -135,11 +135,11 @@ public void testRangeQuery() { TermRangeQuery expected = new TermRangeQuery("field", new BytesRef(aKey.bytes, 0, aKey.size), new BytesRef(bKey.bytes, 0, bKey.size), false, false); - assertEquals(expected, ft.rangeQuery("a", "b", false, false, null)); + assertEquals(expected, ft.rangeQuery("a", "b", false, false, null, null, null, null)); ft.setIndexOptions(IndexOptions.NONE); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> ft.rangeQuery("a", "b", false, false, null)); + () -> ft.rangeQuery("a", "b", false, false, null, null, null, null)); assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); } } diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 815e50f61c12c..f6941a9260e4b 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -23,7 +23,7 @@ esplugin { } versions << [ - 'aws': '1.10.69' + 'aws': '1.11.187' ] dependencies { @@ -55,6 +55,26 @@ test { thirdPartyAudit.excludes = [ // classes are missing + 'com.amazonaws.jmespath.JmesPathEvaluationVisitor', + 'com.amazonaws.jmespath.JmesPathExpression', + 'com.amazonaws.jmespath.JmesPathField', + 'com.amazonaws.jmespath.JmesPathFlatten', + 'com.amazonaws.jmespath.JmesPathIdentity', + 'com.amazonaws.jmespath.JmesPathLengthFunction', + 'com.amazonaws.jmespath.JmesPathLiteral', + 'com.amazonaws.jmespath.JmesPathProjection', + 'com.amazonaws.jmespath.JmesPathSubExpression', + 'com.amazonaws.jmespath.ObjectMapperSingleton', + 'com.amazonaws.jmespath.OpGreaterThan', + 'software.amazon.ion.IonReader', + 'software.amazon.ion.IonSystem', + 'software.amazon.ion.IonType', + 'software.amazon.ion.IonWriter', + 'software.amazon.ion.Timestamp', + 'software.amazon.ion.system.IonBinaryWriterBuilder', + 'software.amazon.ion.system.IonSystemBuilder', + 'software.amazon.ion.system.IonTextWriterBuilder', + 'software.amazon.ion.system.IonWriterBuilder', 'javax.servlet.ServletContextEvent', 'javax.servlet.ServletContextListener', 'org.apache.avalon.framework.logger.Logger', diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.10.69.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.10.69.jar.sha1 deleted file mode 100644 index 2971a33d7d91b..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.10.69.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a1f02d5f26ba1d8c37e2bf9c847db3c6729dda00 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.187.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.187.jar.sha1 new file mode 100644 index 0000000000000..a5293a9bf6580 --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.187.jar.sha1 @@ -0,0 +1 @@ +6f47fcd3c2917bef69dc36aba203c5ea4af9bf24 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.10.69.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.10.69.jar.sha1 deleted file mode 100644 index a1a493d3b8f84..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.10.69.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -afbff1ece8365859eb4cfe0d3ba543d68b154d26 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.187.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.187.jar.sha1 new file mode 100644 index 0000000000000..4602436e08182 --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.187.jar.sha1 @@ -0,0 +1 @@ +f3e5a8601f3105624674b1a12ca34f453a4b5895 \ No newline at end of file diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java index 050a25bb18dc3..34ad449d06e8d 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java @@ -32,14 +32,27 @@ import com.amazonaws.services.ec2.model.AllocateHostsRequest; import com.amazonaws.services.ec2.model.AllocateHostsResult; import com.amazonaws.services.ec2.model.AssignPrivateIpAddressesRequest; +import com.amazonaws.services.ec2.model.AssignPrivateIpAddressesResult; +import com.amazonaws.services.ec2.model.AssignIpv6AddressesRequest; +import com.amazonaws.services.ec2.model.AssignIpv6AddressesResult; import com.amazonaws.services.ec2.model.AssociateAddressRequest; import com.amazonaws.services.ec2.model.AssociateAddressResult; +import com.amazonaws.services.ec2.model.AssociateVpcCidrBlockRequest; +import com.amazonaws.services.ec2.model.AssociateVpcCidrBlockResult; +import com.amazonaws.services.ec2.model.AssociateSubnetCidrBlockRequest; +import com.amazonaws.services.ec2.model.AssociateSubnetCidrBlockResult; +import com.amazonaws.services.ec2.model.AssociateIamInstanceProfileRequest; +import com.amazonaws.services.ec2.model.AssociateIamInstanceProfileResult; +import com.amazonaws.services.ec2.model.AcceptReservedInstancesExchangeQuoteRequest; +import com.amazonaws.services.ec2.model.AcceptReservedInstancesExchangeQuoteResult; import com.amazonaws.services.ec2.model.AssociateDhcpOptionsRequest; +import com.amazonaws.services.ec2.model.AssociateDhcpOptionsResult; import com.amazonaws.services.ec2.model.AssociateRouteTableRequest; import com.amazonaws.services.ec2.model.AssociateRouteTableResult; import com.amazonaws.services.ec2.model.AttachClassicLinkVpcRequest; import com.amazonaws.services.ec2.model.AttachClassicLinkVpcResult; import com.amazonaws.services.ec2.model.AttachInternetGatewayRequest; +import com.amazonaws.services.ec2.model.AttachInternetGatewayResult; import com.amazonaws.services.ec2.model.AttachNetworkInterfaceRequest; import com.amazonaws.services.ec2.model.AttachNetworkInterfaceResult; import com.amazonaws.services.ec2.model.AttachVolumeRequest; @@ -47,13 +60,17 @@ import com.amazonaws.services.ec2.model.AttachVpnGatewayRequest; import com.amazonaws.services.ec2.model.AttachVpnGatewayResult; import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupEgressRequest; +import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupEgressResult; import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupIngressRequest; +import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupIngressResult; import com.amazonaws.services.ec2.model.BundleInstanceRequest; import com.amazonaws.services.ec2.model.BundleInstanceResult; import com.amazonaws.services.ec2.model.CancelBundleTaskRequest; import com.amazonaws.services.ec2.model.CancelBundleTaskResult; import com.amazonaws.services.ec2.model.CancelConversionTaskRequest; +import com.amazonaws.services.ec2.model.CancelConversionTaskResult; import com.amazonaws.services.ec2.model.CancelExportTaskRequest; +import com.amazonaws.services.ec2.model.CancelExportTaskResult; import com.amazonaws.services.ec2.model.CancelImportTaskRequest; import com.amazonaws.services.ec2.model.CancelImportTaskResult; import com.amazonaws.services.ec2.model.CancelReservedInstancesListingRequest; @@ -69,9 +86,17 @@ import com.amazonaws.services.ec2.model.CopySnapshotRequest; import com.amazonaws.services.ec2.model.CopySnapshotResult; import com.amazonaws.services.ec2.model.CreateCustomerGatewayRequest; +import com.amazonaws.services.ec2.model.CreateDefaultVpcResult; +import com.amazonaws.services.ec2.model.CreateDefaultVpcRequest; import com.amazonaws.services.ec2.model.CreateCustomerGatewayResult; import com.amazonaws.services.ec2.model.CreateDhcpOptionsRequest; import com.amazonaws.services.ec2.model.CreateDhcpOptionsResult; +import com.amazonaws.services.ec2.model.CreateEgressOnlyInternetGatewayRequest; +import com.amazonaws.services.ec2.model.CreateEgressOnlyInternetGatewayResult; +import com.amazonaws.services.ec2.model.CreateFpgaImageRequest; +import com.amazonaws.services.ec2.model.CreateFpgaImageResult; +import com.amazonaws.services.ec2.model.CreateNetworkInterfacePermissionRequest; +import com.amazonaws.services.ec2.model.CreateNetworkInterfacePermissionResult; import com.amazonaws.services.ec2.model.CreateFlowLogsRequest; import com.amazonaws.services.ec2.model.CreateFlowLogsResult; import com.amazonaws.services.ec2.model.CreateImageRequest; @@ -85,11 +110,13 @@ import com.amazonaws.services.ec2.model.CreateNatGatewayRequest; import com.amazonaws.services.ec2.model.CreateNatGatewayResult; import com.amazonaws.services.ec2.model.CreateNetworkAclEntryRequest; +import com.amazonaws.services.ec2.model.CreateNetworkAclEntryResult; import com.amazonaws.services.ec2.model.CreateNetworkAclRequest; import com.amazonaws.services.ec2.model.CreateNetworkAclResult; import com.amazonaws.services.ec2.model.CreateNetworkInterfaceRequest; import com.amazonaws.services.ec2.model.CreateNetworkInterfaceResult; import com.amazonaws.services.ec2.model.CreatePlacementGroupRequest; +import com.amazonaws.services.ec2.model.CreatePlacementGroupResult; import com.amazonaws.services.ec2.model.CreateReservedInstancesListingRequest; import com.amazonaws.services.ec2.model.CreateReservedInstancesListingResult; import com.amazonaws.services.ec2.model.CreateRouteRequest; @@ -105,6 +132,7 @@ import com.amazonaws.services.ec2.model.CreateSubnetRequest; import com.amazonaws.services.ec2.model.CreateSubnetResult; import com.amazonaws.services.ec2.model.CreateTagsRequest; +import com.amazonaws.services.ec2.model.CreateTagsResult; import com.amazonaws.services.ec2.model.CreateVolumeRequest; import com.amazonaws.services.ec2.model.CreateVolumeResult; import com.amazonaws.services.ec2.model.CreateVpcEndpointRequest; @@ -116,37 +144,63 @@ import com.amazonaws.services.ec2.model.CreateVpnConnectionRequest; import com.amazonaws.services.ec2.model.CreateVpnConnectionResult; import com.amazonaws.services.ec2.model.CreateVpnConnectionRouteRequest; +import com.amazonaws.services.ec2.model.CreateVpnConnectionRouteResult; import com.amazonaws.services.ec2.model.CreateVpnGatewayRequest; import com.amazonaws.services.ec2.model.CreateVpnGatewayResult; import com.amazonaws.services.ec2.model.DeleteCustomerGatewayRequest; +import com.amazonaws.services.ec2.model.DeleteCustomerGatewayResult; import com.amazonaws.services.ec2.model.DeleteDhcpOptionsRequest; +import com.amazonaws.services.ec2.model.DeleteDhcpOptionsResult; +import com.amazonaws.services.ec2.model.DeleteEgressOnlyInternetGatewayRequest; +import com.amazonaws.services.ec2.model.DeleteEgressOnlyInternetGatewayResult; +import com.amazonaws.services.ec2.model.DeleteNetworkInterfacePermissionRequest; +import com.amazonaws.services.ec2.model.DeleteNetworkInterfacePermissionResult; import com.amazonaws.services.ec2.model.DeleteFlowLogsRequest; import com.amazonaws.services.ec2.model.DeleteFlowLogsResult; import com.amazonaws.services.ec2.model.DeleteInternetGatewayRequest; +import com.amazonaws.services.ec2.model.DeleteInternetGatewayResult; import com.amazonaws.services.ec2.model.DeleteKeyPairRequest; +import com.amazonaws.services.ec2.model.DeleteKeyPairResult; import com.amazonaws.services.ec2.model.DeleteNatGatewayRequest; import com.amazonaws.services.ec2.model.DeleteNatGatewayResult; import com.amazonaws.services.ec2.model.DeleteNetworkAclEntryRequest; +import com.amazonaws.services.ec2.model.DeleteNetworkAclEntryResult; import com.amazonaws.services.ec2.model.DeleteNetworkAclRequest; +import com.amazonaws.services.ec2.model.DeleteNetworkAclResult; import com.amazonaws.services.ec2.model.DeleteNetworkInterfaceRequest; +import com.amazonaws.services.ec2.model.DeleteNetworkInterfaceResult; import com.amazonaws.services.ec2.model.DeletePlacementGroupRequest; +import com.amazonaws.services.ec2.model.DeletePlacementGroupResult; import com.amazonaws.services.ec2.model.DeleteRouteRequest; +import com.amazonaws.services.ec2.model.DeleteRouteResult; import com.amazonaws.services.ec2.model.DeleteRouteTableRequest; +import com.amazonaws.services.ec2.model.DeleteRouteTableResult; import com.amazonaws.services.ec2.model.DeleteSecurityGroupRequest; +import com.amazonaws.services.ec2.model.DeleteSecurityGroupResult; import com.amazonaws.services.ec2.model.DeleteSnapshotRequest; +import com.amazonaws.services.ec2.model.DeleteSnapshotResult; import com.amazonaws.services.ec2.model.DeleteSpotDatafeedSubscriptionRequest; +import com.amazonaws.services.ec2.model.DeleteSpotDatafeedSubscriptionResult; import com.amazonaws.services.ec2.model.DeleteSubnetRequest; +import com.amazonaws.services.ec2.model.DeleteSubnetResult; import com.amazonaws.services.ec2.model.DeleteTagsRequest; +import com.amazonaws.services.ec2.model.DeleteTagsResult; import com.amazonaws.services.ec2.model.DeleteVolumeRequest; +import com.amazonaws.services.ec2.model.DeleteVolumeResult; import com.amazonaws.services.ec2.model.DeleteVpcEndpointsRequest; import com.amazonaws.services.ec2.model.DeleteVpcEndpointsResult; import com.amazonaws.services.ec2.model.DeleteVpcPeeringConnectionRequest; import com.amazonaws.services.ec2.model.DeleteVpcPeeringConnectionResult; import com.amazonaws.services.ec2.model.DeleteVpcRequest; +import com.amazonaws.services.ec2.model.DeleteVpcResult; import com.amazonaws.services.ec2.model.DeleteVpnConnectionRequest; +import com.amazonaws.services.ec2.model.DeleteVpnConnectionResult; import com.amazonaws.services.ec2.model.DeleteVpnConnectionRouteRequest; +import com.amazonaws.services.ec2.model.DeleteVpnConnectionRouteResult; import com.amazonaws.services.ec2.model.DeleteVpnGatewayRequest; +import com.amazonaws.services.ec2.model.DeleteVpnGatewayResult; import com.amazonaws.services.ec2.model.DeregisterImageRequest; +import com.amazonaws.services.ec2.model.DeregisterImageResult; import com.amazonaws.services.ec2.model.DescribeAccountAttributesRequest; import com.amazonaws.services.ec2.model.DescribeAccountAttributesResult; import com.amazonaws.services.ec2.model.DescribeAddressesRequest; @@ -163,12 +217,26 @@ import com.amazonaws.services.ec2.model.DescribeCustomerGatewaysResult; import com.amazonaws.services.ec2.model.DescribeDhcpOptionsRequest; import com.amazonaws.services.ec2.model.DescribeDhcpOptionsResult; +import com.amazonaws.services.ec2.model.DescribeEgressOnlyInternetGatewaysRequest; +import com.amazonaws.services.ec2.model.DescribeEgressOnlyInternetGatewaysResult; import com.amazonaws.services.ec2.model.DescribeExportTasksRequest; import com.amazonaws.services.ec2.model.DescribeExportTasksResult; +import com.amazonaws.services.ec2.model.DescribeElasticGpusRequest; +import com.amazonaws.services.ec2.model.DescribeElasticGpusResult; +import com.amazonaws.services.ec2.model.DescribeFpgaImagesRequest; +import com.amazonaws.services.ec2.model.DescribeFpgaImagesResult; +import com.amazonaws.services.ec2.model.DescribeHostReservationOfferingsRequest; +import com.amazonaws.services.ec2.model.DescribeHostReservationOfferingsResult; +import com.amazonaws.services.ec2.model.DescribeHostReservationsRequest; +import com.amazonaws.services.ec2.model.DescribeHostReservationsResult; +import com.amazonaws.services.ec2.model.DescribeIdentityIdFormatRequest; +import com.amazonaws.services.ec2.model.DescribeIdentityIdFormatResult; import com.amazonaws.services.ec2.model.DescribeFlowLogsRequest; import com.amazonaws.services.ec2.model.DescribeFlowLogsResult; import com.amazonaws.services.ec2.model.DescribeHostsRequest; import com.amazonaws.services.ec2.model.DescribeHostsResult; +import com.amazonaws.services.ec2.model.DescribeIamInstanceProfileAssociationsRequest; +import com.amazonaws.services.ec2.model.DescribeIamInstanceProfileAssociationsResult; import com.amazonaws.services.ec2.model.DescribeIdFormatRequest; import com.amazonaws.services.ec2.model.DescribeIdFormatResult; import com.amazonaws.services.ec2.model.DescribeImageAttributeRequest; @@ -199,6 +267,8 @@ import com.amazonaws.services.ec2.model.DescribeNetworkInterfaceAttributeResult; import com.amazonaws.services.ec2.model.DescribeNetworkInterfacesRequest; import com.amazonaws.services.ec2.model.DescribeNetworkInterfacesResult; +import com.amazonaws.services.ec2.model.DescribeNetworkInterfacePermissionsRequest; +import com.amazonaws.services.ec2.model.DescribeNetworkInterfacePermissionsResult; import com.amazonaws.services.ec2.model.DescribePlacementGroupsRequest; import com.amazonaws.services.ec2.model.DescribePlacementGroupsResult; import com.amazonaws.services.ec2.model.DescribePrefixListsRequest; @@ -221,6 +291,10 @@ import com.amazonaws.services.ec2.model.DescribeScheduledInstancesResult; import com.amazonaws.services.ec2.model.DescribeSecurityGroupsRequest; import com.amazonaws.services.ec2.model.DescribeSecurityGroupsResult; +import com.amazonaws.services.ec2.model.DescribeStaleSecurityGroupsRequest; +import com.amazonaws.services.ec2.model.DescribeStaleSecurityGroupsResult; +import com.amazonaws.services.ec2.model.DescribeSecurityGroupReferencesRequest; +import com.amazonaws.services.ec2.model.DescribeSecurityGroupReferencesResult; import com.amazonaws.services.ec2.model.DescribeSnapshotAttributeRequest; import com.amazonaws.services.ec2.model.DescribeSnapshotAttributeResult; import com.amazonaws.services.ec2.model.DescribeSnapshotsRequest; @@ -245,6 +319,8 @@ import com.amazonaws.services.ec2.model.DescribeVolumeAttributeResult; import com.amazonaws.services.ec2.model.DescribeVolumeStatusRequest; import com.amazonaws.services.ec2.model.DescribeVolumeStatusResult; +import com.amazonaws.services.ec2.model.DescribeVolumesModificationsRequest; +import com.amazonaws.services.ec2.model.DescribeVolumesModificationsResult; import com.amazonaws.services.ec2.model.DescribeVolumesRequest; import com.amazonaws.services.ec2.model.DescribeVolumesResult; import com.amazonaws.services.ec2.model.DescribeVpcAttributeRequest; @@ -268,21 +344,35 @@ import com.amazonaws.services.ec2.model.DetachClassicLinkVpcRequest; import com.amazonaws.services.ec2.model.DetachClassicLinkVpcResult; import com.amazonaws.services.ec2.model.DetachInternetGatewayRequest; +import com.amazonaws.services.ec2.model.DetachInternetGatewayResult; import com.amazonaws.services.ec2.model.DetachNetworkInterfaceRequest; +import com.amazonaws.services.ec2.model.DetachNetworkInterfaceResult; import com.amazonaws.services.ec2.model.DetachVolumeRequest; import com.amazonaws.services.ec2.model.DetachVolumeResult; import com.amazonaws.services.ec2.model.DetachVpnGatewayRequest; +import com.amazonaws.services.ec2.model.DetachVpnGatewayResult; import com.amazonaws.services.ec2.model.DisableVgwRoutePropagationRequest; +import com.amazonaws.services.ec2.model.DisableVgwRoutePropagationResult; import com.amazonaws.services.ec2.model.DisableVpcClassicLinkDnsSupportRequest; import com.amazonaws.services.ec2.model.DisableVpcClassicLinkDnsSupportResult; import com.amazonaws.services.ec2.model.DisableVpcClassicLinkRequest; import com.amazonaws.services.ec2.model.DisableVpcClassicLinkResult; import com.amazonaws.services.ec2.model.DisassociateAddressRequest; +import com.amazonaws.services.ec2.model.DisassociateAddressResult; import com.amazonaws.services.ec2.model.DisassociateRouteTableRequest; +import com.amazonaws.services.ec2.model.DisassociateRouteTableResult; +import com.amazonaws.services.ec2.model.DisassociateIamInstanceProfileRequest; +import com.amazonaws.services.ec2.model.DisassociateIamInstanceProfileResult; +import com.amazonaws.services.ec2.model.DisassociateVpcCidrBlockRequest; +import com.amazonaws.services.ec2.model.DisassociateVpcCidrBlockResult; +import com.amazonaws.services.ec2.model.DisassociateSubnetCidrBlockRequest; +import com.amazonaws.services.ec2.model.DisassociateSubnetCidrBlockResult; import com.amazonaws.services.ec2.model.DryRunResult; import com.amazonaws.services.ec2.model.DryRunSupportedRequest; import com.amazonaws.services.ec2.model.EnableVgwRoutePropagationRequest; +import com.amazonaws.services.ec2.model.EnableVgwRoutePropagationResult; import com.amazonaws.services.ec2.model.EnableVolumeIORequest; +import com.amazonaws.services.ec2.model.EnableVolumeIOResult; import com.amazonaws.services.ec2.model.EnableVpcClassicLinkDnsSupportRequest; import com.amazonaws.services.ec2.model.EnableVpcClassicLinkDnsSupportResult; import com.amazonaws.services.ec2.model.EnableVpcClassicLinkRequest; @@ -290,8 +380,14 @@ import com.amazonaws.services.ec2.model.Filter; import com.amazonaws.services.ec2.model.GetConsoleOutputRequest; import com.amazonaws.services.ec2.model.GetConsoleOutputResult; +import com.amazonaws.services.ec2.model.GetConsoleScreenshotRequest; +import com.amazonaws.services.ec2.model.GetConsoleScreenshotResult; +import com.amazonaws.services.ec2.model.GetHostReservationPurchasePreviewRequest; +import com.amazonaws.services.ec2.model.GetHostReservationPurchasePreviewResult; import com.amazonaws.services.ec2.model.GetPasswordDataRequest; import com.amazonaws.services.ec2.model.GetPasswordDataResult; +import com.amazonaws.services.ec2.model.GetReservedInstancesExchangeQuoteRequest; +import com.amazonaws.services.ec2.model.GetReservedInstancesExchangeQuoteResult; import com.amazonaws.services.ec2.model.ImportImageRequest; import com.amazonaws.services.ec2.model.ImportImageResult; import com.amazonaws.services.ec2.model.ImportInstanceRequest; @@ -308,19 +404,31 @@ import com.amazonaws.services.ec2.model.ModifyHostsRequest; import com.amazonaws.services.ec2.model.ModifyHostsResult; import com.amazonaws.services.ec2.model.ModifyIdFormatRequest; +import com.amazonaws.services.ec2.model.ModifyIdFormatResult; import com.amazonaws.services.ec2.model.ModifyImageAttributeRequest; +import com.amazonaws.services.ec2.model.ModifyImageAttributeResult; import com.amazonaws.services.ec2.model.ModifyInstanceAttributeRequest; +import com.amazonaws.services.ec2.model.ModifyInstanceAttributeResult; import com.amazonaws.services.ec2.model.ModifyInstancePlacementRequest; import com.amazonaws.services.ec2.model.ModifyInstancePlacementResult; +import com.amazonaws.services.ec2.model.ModifyIdentityIdFormatRequest; +import com.amazonaws.services.ec2.model.ModifyIdentityIdFormatResult; import com.amazonaws.services.ec2.model.ModifyNetworkInterfaceAttributeRequest; +import com.amazonaws.services.ec2.model.ModifyNetworkInterfaceAttributeResult; import com.amazonaws.services.ec2.model.ModifyReservedInstancesRequest; import com.amazonaws.services.ec2.model.ModifyReservedInstancesResult; import com.amazonaws.services.ec2.model.ModifySnapshotAttributeRequest; +import com.amazonaws.services.ec2.model.ModifySnapshotAttributeResult; import com.amazonaws.services.ec2.model.ModifySpotFleetRequestRequest; import com.amazonaws.services.ec2.model.ModifySpotFleetRequestResult; import com.amazonaws.services.ec2.model.ModifySubnetAttributeRequest; +import com.amazonaws.services.ec2.model.ModifySubnetAttributeResult; import com.amazonaws.services.ec2.model.ModifyVolumeAttributeRequest; +import com.amazonaws.services.ec2.model.ModifyVolumeAttributeResult; +import com.amazonaws.services.ec2.model.ModifyVolumeRequest; +import com.amazonaws.services.ec2.model.ModifyVolumeResult; import com.amazonaws.services.ec2.model.ModifyVpcAttributeRequest; +import com.amazonaws.services.ec2.model.ModifyVpcAttributeResult; import com.amazonaws.services.ec2.model.ModifyVpcEndpointRequest; import com.amazonaws.services.ec2.model.ModifyVpcEndpointResult; import com.amazonaws.services.ec2.model.MonitorInstancesRequest; @@ -331,34 +439,51 @@ import com.amazonaws.services.ec2.model.PurchaseReservedInstancesOfferingResult; import com.amazonaws.services.ec2.model.PurchaseScheduledInstancesRequest; import com.amazonaws.services.ec2.model.PurchaseScheduledInstancesResult; +import com.amazonaws.services.ec2.model.PurchaseHostReservationRequest; +import com.amazonaws.services.ec2.model.PurchaseHostReservationResult; import com.amazonaws.services.ec2.model.RebootInstancesRequest; +import com.amazonaws.services.ec2.model.RebootInstancesResult; import com.amazonaws.services.ec2.model.RegisterImageRequest; import com.amazonaws.services.ec2.model.RegisterImageResult; import com.amazonaws.services.ec2.model.RejectVpcPeeringConnectionRequest; import com.amazonaws.services.ec2.model.RejectVpcPeeringConnectionResult; +import com.amazonaws.services.ec2.model.ModifyVpcPeeringConnectionOptionsRequest; +import com.amazonaws.services.ec2.model.ModifyVpcPeeringConnectionOptionsResult; import com.amazonaws.services.ec2.model.ReleaseAddressRequest; +import com.amazonaws.services.ec2.model.ReleaseAddressResult; import com.amazonaws.services.ec2.model.ReleaseHostsRequest; import com.amazonaws.services.ec2.model.ReleaseHostsResult; +import com.amazonaws.services.ec2.model.ReplaceIamInstanceProfileAssociationRequest; +import com.amazonaws.services.ec2.model.ReplaceIamInstanceProfileAssociationResult; import com.amazonaws.services.ec2.model.ReplaceNetworkAclAssociationRequest; import com.amazonaws.services.ec2.model.ReplaceNetworkAclAssociationResult; import com.amazonaws.services.ec2.model.ReplaceNetworkAclEntryRequest; +import com.amazonaws.services.ec2.model.ReplaceNetworkAclEntryResult; import com.amazonaws.services.ec2.model.ReplaceRouteRequest; +import com.amazonaws.services.ec2.model.ReplaceRouteResult; import com.amazonaws.services.ec2.model.ReplaceRouteTableAssociationRequest; import com.amazonaws.services.ec2.model.ReplaceRouteTableAssociationResult; import com.amazonaws.services.ec2.model.ReportInstanceStatusRequest; +import com.amazonaws.services.ec2.model.ReportInstanceStatusResult; import com.amazonaws.services.ec2.model.RequestSpotFleetRequest; import com.amazonaws.services.ec2.model.RequestSpotFleetResult; import com.amazonaws.services.ec2.model.RequestSpotInstancesRequest; import com.amazonaws.services.ec2.model.RequestSpotInstancesResult; import com.amazonaws.services.ec2.model.Reservation; import com.amazonaws.services.ec2.model.ResetImageAttributeRequest; +import com.amazonaws.services.ec2.model.ResetImageAttributeResult; import com.amazonaws.services.ec2.model.ResetInstanceAttributeRequest; +import com.amazonaws.services.ec2.model.ResetInstanceAttributeResult; import com.amazonaws.services.ec2.model.ResetNetworkInterfaceAttributeRequest; +import com.amazonaws.services.ec2.model.ResetNetworkInterfaceAttributeResult; import com.amazonaws.services.ec2.model.ResetSnapshotAttributeRequest; +import com.amazonaws.services.ec2.model.ResetSnapshotAttributeResult; import com.amazonaws.services.ec2.model.RestoreAddressToClassicRequest; import com.amazonaws.services.ec2.model.RestoreAddressToClassicResult; import com.amazonaws.services.ec2.model.RevokeSecurityGroupEgressRequest; +import com.amazonaws.services.ec2.model.RevokeSecurityGroupEgressResult; import com.amazonaws.services.ec2.model.RevokeSecurityGroupIngressRequest; +import com.amazonaws.services.ec2.model.RevokeSecurityGroupIngressResult; import com.amazonaws.services.ec2.model.RunInstancesRequest; import com.amazonaws.services.ec2.model.RunInstancesResult; import com.amazonaws.services.ec2.model.RunScheduledInstancesRequest; @@ -370,9 +495,17 @@ import com.amazonaws.services.ec2.model.Tag; import com.amazonaws.services.ec2.model.TerminateInstancesRequest; import com.amazonaws.services.ec2.model.TerminateInstancesResult; +import com.amazonaws.services.ec2.model.UnassignIpv6AddressesRequest; +import com.amazonaws.services.ec2.model.UnassignIpv6AddressesResult; import com.amazonaws.services.ec2.model.UnassignPrivateIpAddressesRequest; +import com.amazonaws.services.ec2.model.UnassignPrivateIpAddressesResult; import com.amazonaws.services.ec2.model.UnmonitorInstancesRequest; import com.amazonaws.services.ec2.model.UnmonitorInstancesResult; +import com.amazonaws.services.ec2.model.UpdateSecurityGroupRuleDescriptionsEgressRequest; +import com.amazonaws.services.ec2.model.UpdateSecurityGroupRuleDescriptionsEgressResult; +import com.amazonaws.services.ec2.model.UpdateSecurityGroupRuleDescriptionsIngressRequest; +import com.amazonaws.services.ec2.model.UpdateSecurityGroupRuleDescriptionsIngressResult; +import com.amazonaws.services.ec2.waiters.AmazonEC2Waiters; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.ESLoggerFactory; @@ -518,7 +651,13 @@ public void setRegion(Region region) throws IllegalArgumentException { } @Override - public void rebootInstances(RebootInstancesRequest rebootInstancesRequest) throws AmazonServiceException, AmazonClientException { + public AcceptReservedInstancesExchangeQuoteResult acceptReservedInstancesExchangeQuote( + AcceptReservedInstancesExchangeQuoteRequest acceptReservedInstancesExchangeQuoteRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public RebootInstancesResult rebootInstances(RebootInstancesRequest rebootInstancesRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -548,7 +687,7 @@ public DetachVolumeResult detachVolume(DetachVolumeRequest detachVolumeRequest) } @Override - public void deleteKeyPair(DeleteKeyPairRequest deleteKeyPairRequest) throws AmazonServiceException, AmazonClientException { + public DeleteKeyPairResult deleteKeyPair(DeleteKeyPairRequest deleteKeyPairRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -562,6 +701,16 @@ public UnmonitorInstancesResult unmonitorInstances(UnmonitorInstancesRequest unm throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public UpdateSecurityGroupRuleDescriptionsIngressResult updateSecurityGroupRuleDescriptionsIngress(UpdateSecurityGroupRuleDescriptionsIngressRequest updateSecurityGroupRuleDescriptionsIngressRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public UpdateSecurityGroupRuleDescriptionsEgressResult updateSecurityGroupRuleDescriptionsEgress(UpdateSecurityGroupRuleDescriptionsEgressRequest updateSecurityGroupRuleDescriptionsEgressRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public AttachVpnGatewayResult attachVpnGateway(AttachVpnGatewayRequest attachVpnGatewayRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); @@ -573,7 +722,7 @@ public CreateImageResult createImage(CreateImageRequest createImageRequest) thro } @Override - public void deleteSecurityGroup(DeleteSecurityGroupRequest deleteSecurityGroupRequest) throws AmazonServiceException, AmazonClientException { + public DeleteSecurityGroupResult deleteSecurityGroup(DeleteSecurityGroupRequest deleteSecurityGroupRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -583,12 +732,12 @@ public CreateInstanceExportTaskResult createInstanceExportTask(CreateInstanceExp } @Override - public void authorizeSecurityGroupEgress(AuthorizeSecurityGroupEgressRequest authorizeSecurityGroupEgressRequest) throws AmazonServiceException, AmazonClientException { + public AuthorizeSecurityGroupEgressResult authorizeSecurityGroupEgress(AuthorizeSecurityGroupEgressRequest authorizeSecurityGroupEgressRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void associateDhcpOptions(AssociateDhcpOptionsRequest associateDhcpOptionsRequest) throws AmazonServiceException, AmazonClientException { + public AssociateDhcpOptionsResult associateDhcpOptions(AssociateDhcpOptionsRequest associateDhcpOptionsRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -597,6 +746,11 @@ public GetPasswordDataResult getPasswordData(GetPasswordDataRequest getPasswordD throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public GetReservedInstancesExchangeQuoteResult getReservedInstancesExchangeQuote(GetReservedInstancesExchangeQuoteRequest getReservedInstancesExchangeQuoteRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public StopInstancesResult stopInstances(StopInstancesRequest stopInstancesRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); @@ -608,12 +762,12 @@ public ImportKeyPairResult importKeyPair(ImportKeyPairRequest importKeyPairReque } @Override - public void deleteNetworkInterface(DeleteNetworkInterfaceRequest deleteNetworkInterfaceRequest) throws AmazonServiceException, AmazonClientException { + public DeleteNetworkInterfaceResult deleteNetworkInterface(DeleteNetworkInterfaceRequest deleteNetworkInterfaceRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void modifyVpcAttribute(ModifyVpcAttributeRequest modifyVpcAttributeRequest) throws AmazonServiceException, AmazonClientException { + public ModifyVpcAttributeResult modifyVpcAttribute(ModifyVpcAttributeRequest modifyVpcAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -637,6 +791,11 @@ public DescribeNetworkInterfacesResult describeNetworkInterfaces(DescribeNetwork throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public DescribeNetworkInterfacePermissionsResult describeNetworkInterfacePermissions(DescribeNetworkInterfacePermissionsRequest describeNetworkInterfacePermissionsRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public DescribeRegionsResult describeRegions(DescribeRegionsRequest describeRegionsRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); @@ -658,12 +817,12 @@ public DeleteVpcEndpointsResult deleteVpcEndpoints(DeleteVpcEndpointsRequest del } @Override - public void resetSnapshotAttribute(ResetSnapshotAttributeRequest resetSnapshotAttributeRequest) throws AmazonServiceException, AmazonClientException { + public ResetSnapshotAttributeResult resetSnapshotAttribute(ResetSnapshotAttributeRequest resetSnapshotAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void deleteRoute(DeleteRouteRequest deleteRouteRequest) throws AmazonServiceException, AmazonClientException { + public DeleteRouteResult deleteRoute(DeleteRouteRequest deleteRouteRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -683,7 +842,7 @@ public ModifyHostsResult modifyHosts(ModifyHostsRequest modifyHostsRequest) { } @Override - public void modifyIdFormat(ModifyIdFormatRequest modifyIdFormatRequest) { + public ModifyIdFormatResult modifyIdFormat(ModifyIdFormatRequest modifyIdFormatRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @@ -692,23 +851,38 @@ public DescribeSecurityGroupsResult describeSecurityGroups(DescribeSecurityGroup throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public DescribeStaleSecurityGroupsResult describeStaleSecurityGroups(DescribeStaleSecurityGroupsRequest describeStaleSecurityGroupsRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeSecurityGroupReferencesResult describeSecurityGroupReferences(DescribeSecurityGroupReferencesRequest describeSecurityGroupReferencesRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public RejectVpcPeeringConnectionResult rejectVpcPeeringConnection(RejectVpcPeeringConnectionRequest rejectVpcPeeringConnectionRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public ModifyVpcPeeringConnectionOptionsResult modifyVpcPeeringConnectionOptions(ModifyVpcPeeringConnectionOptionsRequest modifyVpcPeeringConnectionOptionsRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public DeleteFlowLogsResult deleteFlowLogs(DeleteFlowLogsRequest deleteFlowLogsRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void detachVpnGateway(DetachVpnGatewayRequest detachVpnGatewayRequest) throws AmazonServiceException, AmazonClientException { + public DetachVpnGatewayResult detachVpnGateway(DetachVpnGatewayRequest detachVpnGatewayRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void deregisterImage(DeregisterImageRequest deregisterImageRequest) throws AmazonServiceException, AmazonClientException { + public DeregisterImageResult deregisterImage(DeregisterImageRequest deregisterImageRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -718,12 +892,12 @@ public DescribeSpotDatafeedSubscriptionResult describeSpotDatafeedSubscription(D } @Override - public void deleteTags(DeleteTagsRequest deleteTagsRequest) throws AmazonServiceException, AmazonClientException { + public DeleteTagsResult deleteTags(DeleteTagsRequest deleteTagsRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void deleteSubnet(DeleteSubnetRequest deleteSubnetRequest) throws AmazonServiceException, AmazonClientException { + public DeleteSubnetResult deleteSubnet(DeleteSubnetRequest deleteSubnetRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -743,7 +917,7 @@ public CreateVpnGatewayResult createVpnGateway(CreateVpnGatewayRequest createVpn } @Override - public void enableVolumeIO(EnableVolumeIORequest enableVolumeIORequest) throws AmazonServiceException, AmazonClientException { + public EnableVolumeIOResult enableVolumeIO(EnableVolumeIORequest enableVolumeIORequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -753,7 +927,7 @@ public MoveAddressToVpcResult moveAddressToVpc(MoveAddressToVpcRequest moveAddre } @Override - public void deleteVpnGateway(DeleteVpnGatewayRequest deleteVpnGatewayRequest) throws AmazonServiceException, AmazonClientException { + public DeleteVpnGatewayResult deleteVpnGateway(DeleteVpnGatewayRequest deleteVpnGatewayRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -767,6 +941,11 @@ public DescribeVolumeStatusResult describeVolumeStatus(DescribeVolumeStatusReque throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public DescribeVolumesModificationsResult describeVolumesModifications(DescribeVolumesModificationsRequest describeVolumesModificationsRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public DescribeImportSnapshotTasksResult describeImportSnapshotTasks(DescribeImportSnapshotTasksRequest describeImportSnapshotTasksRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); @@ -778,12 +957,12 @@ public DescribeVpnConnectionsResult describeVpnConnections(DescribeVpnConnection } @Override - public void resetImageAttribute(ResetImageAttributeRequest resetImageAttributeRequest) throws AmazonServiceException, AmazonClientException { + public ResetImageAttributeResult resetImageAttribute(ResetImageAttributeRequest resetImageAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void enableVgwRoutePropagation(EnableVgwRoutePropagationRequest enableVgwRoutePropagationRequest) throws AmazonServiceException, AmazonClientException { + public EnableVgwRoutePropagationResult enableVgwRoutePropagation(EnableVgwRoutePropagationRequest enableVgwRoutePropagationRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -793,7 +972,7 @@ public CreateSnapshotResult createSnapshot(CreateSnapshotRequest createSnapshotR } @Override - public void deleteVolume(DeleteVolumeRequest deleteVolumeRequest) throws AmazonServiceException, AmazonClientException { + public DeleteVolumeResult deleteVolume(DeleteVolumeRequest deleteVolumeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -813,7 +992,12 @@ public CancelSpotFleetRequestsResult cancelSpotFleetRequests(CancelSpotFleetRequ } @Override - public void unassignPrivateIpAddresses(UnassignPrivateIpAddressesRequest unassignPrivateIpAddressesRequest) throws AmazonServiceException, AmazonClientException { + public UnassignPrivateIpAddressesResult unassignPrivateIpAddresses(UnassignPrivateIpAddressesRequest unassignPrivateIpAddressesRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public UnassignIpv6AddressesResult unassignIpv6Addresses(UnassignIpv6AddressesRequest unassignIpv6AddressesRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -823,7 +1007,7 @@ public DescribeVpcsResult describeVpcs(DescribeVpcsRequest describeVpcsRequest) } @Override - public void cancelConversionTask(CancelConversionTaskRequest cancelConversionTaskRequest) throws AmazonServiceException, AmazonClientException { + public CancelConversionTaskResult cancelConversionTask(CancelConversionTaskRequest cancelConversionTaskRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -833,12 +1017,27 @@ public AssociateAddressResult associateAddress(AssociateAddressRequest associate } @Override - public void deleteCustomerGateway(DeleteCustomerGatewayRequest deleteCustomerGatewayRequest) throws AmazonServiceException, AmazonClientException { + public AssociateIamInstanceProfileResult associateIamInstanceProfile(AssociateIamInstanceProfileRequest associateIamInstanceRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public AssociateVpcCidrBlockResult associateVpcCidrBlock(AssociateVpcCidrBlockRequest associateVpcCidrBlockRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public AssociateSubnetCidrBlockResult associateSubnetCidrBlock(AssociateSubnetCidrBlockRequest associateSubnetCidrBlockRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void createNetworkAclEntry(CreateNetworkAclEntryRequest createNetworkAclEntryRequest) throws AmazonServiceException, AmazonClientException { + public DeleteCustomerGatewayResult deleteCustomerGateway(DeleteCustomerGatewayRequest deleteCustomerGatewayRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateNetworkAclEntryResult createNetworkAclEntry(CreateNetworkAclEntryRequest createNetworkAclEntryRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -853,7 +1052,32 @@ public DescribeExportTasksResult describeExportTasks(DescribeExportTasksRequest } @Override - public void detachInternetGateway(DetachInternetGatewayRequest detachInternetGatewayRequest) throws AmazonServiceException, AmazonClientException { + public DescribeElasticGpusResult describeElasticGpus(DescribeElasticGpusRequest describeElasticGpusRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeFpgaImagesResult describeFpgaImages(DescribeFpgaImagesRequest describeFpgaImagesRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeHostReservationOfferingsResult describeHostReservationOfferings(DescribeHostReservationOfferingsRequest describeHostReservationOfferingsRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeHostReservationsResult describeHostReservations(DescribeHostReservationsRequest describeHostReservationsRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeIdentityIdFormatResult describeIdentityIdFormat(DescribeIdentityIdFormatRequest describeIdentityIdFormatRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DetachInternetGatewayResult detachInternetGateway(DetachInternetGatewayRequest detachInternetGatewayRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -883,7 +1107,7 @@ public DescribeReservedInstancesListingsResult describeReservedInstancesListings } @Override - public void reportInstanceStatus(ReportInstanceStatusRequest reportInstanceStatusRequest) throws AmazonServiceException, AmazonClientException { + public ReportInstanceStatusResult reportInstanceStatus(ReportInstanceStatusRequest reportInstanceStatusRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -896,6 +1120,12 @@ public DescribeRouteTablesResult describeRouteTables(DescribeRouteTablesRequest public DescribeDhcpOptionsResult describeDhcpOptions(DescribeDhcpOptionsRequest describeDhcpOptionsRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } + + @Override + public DescribeEgressOnlyInternetGatewaysResult describeEgressOnlyInternetGateways( + DescribeEgressOnlyInternetGatewaysRequest describeEgressOnlyInternetGatewaysRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } @Override public MonitorInstancesResult monitorInstances(MonitorInstancesRequest monitorInstancesRequest) throws AmazonServiceException, AmazonClientException { @@ -933,17 +1163,22 @@ public ImportInstanceResult importInstance(ImportInstanceRequest importInstanceR } @Override - public void revokeSecurityGroupIngress(RevokeSecurityGroupIngressRequest revokeSecurityGroupIngressRequest) throws AmazonServiceException, AmazonClientException { + public DeleteVpcPeeringConnectionResult deleteVpcPeeringConnection(DeleteVpcPeeringConnectionRequest deleteVpcPeeringConnectionRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DeleteVpcPeeringConnectionResult deleteVpcPeeringConnection(DeleteVpcPeeringConnectionRequest deleteVpcPeeringConnectionRequest) throws AmazonServiceException, AmazonClientException { + public GetConsoleOutputResult getConsoleOutput(GetConsoleOutputRequest getConsoleOutputRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public GetConsoleOutputResult getConsoleOutput(GetConsoleOutputRequest getConsoleOutputRequest) throws AmazonServiceException, AmazonClientException { + public GetConsoleScreenshotResult getConsoleScreenshot(GetConsoleScreenshotRequest getConsoleScreenshotRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public GetHostReservationPurchasePreviewResult getHostReservationPurchasePreview(GetHostReservationPurchasePreviewRequest getHostReservationPurchasePreviewRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -953,17 +1188,17 @@ public CreateInternetGatewayResult createInternetGateway(CreateInternetGatewayRe } @Override - public void deleteVpnConnectionRoute(DeleteVpnConnectionRouteRequest deleteVpnConnectionRouteRequest) throws AmazonServiceException, AmazonClientException { + public DeleteVpnConnectionRouteResult deleteVpnConnectionRoute(DeleteVpnConnectionRouteRequest deleteVpnConnectionRouteRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void detachNetworkInterface(DetachNetworkInterfaceRequest detachNetworkInterfaceRequest) throws AmazonServiceException, AmazonClientException { + public DetachNetworkInterfaceResult detachNetworkInterface(DetachNetworkInterfaceRequest detachNetworkInterfaceRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void modifyImageAttribute(ModifyImageAttributeRequest modifyImageAttributeRequest) throws AmazonServiceException, AmazonClientException { + public ModifyImageAttributeResult modifyImageAttribute(ModifyImageAttributeRequest modifyImageAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -972,18 +1207,38 @@ public CreateCustomerGatewayResult createCustomerGateway(CreateCustomerGatewayRe throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public CreateEgressOnlyInternetGatewayResult createEgressOnlyInternetGateway(CreateEgressOnlyInternetGatewayRequest createEgressOnlyInternetGatewayRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateFpgaImageResult createFpgaImage(CreateFpgaImageRequest createFpgaImageRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateNetworkInterfacePermissionResult createNetworkInterfacePermission(CreateNetworkInterfacePermissionRequest createNetworkInterfacePermissionRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateDefaultVpcResult createDefaultVpc(CreateDefaultVpcRequest createDefaultVpcRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public CreateSpotDatafeedSubscriptionResult createSpotDatafeedSubscription(CreateSpotDatafeedSubscriptionRequest createSpotDatafeedSubscriptionRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void attachInternetGateway(AttachInternetGatewayRequest attachInternetGatewayRequest) throws AmazonServiceException, AmazonClientException { + public AttachInternetGatewayResult attachInternetGateway(AttachInternetGatewayRequest attachInternetGatewayRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void deleteVpnConnection(DeleteVpnConnectionRequest deleteVpnConnectionRequest) throws AmazonServiceException, AmazonClientException { + public DeleteVpnConnectionResult deleteVpnConnection(DeleteVpnConnectionRequest deleteVpnConnectionRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1058,12 +1313,12 @@ public AssociateRouteTableResult associateRouteTable(AssociateRouteTableRequest } @Override - public void modifyVolumeAttribute(ModifyVolumeAttributeRequest modifyVolumeAttributeRequest) throws AmazonServiceException, AmazonClientException { + public ModifyVolumeAttributeResult modifyVolumeAttribute(ModifyVolumeAttributeRequest modifyVolumeAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void deleteNetworkAcl(DeleteNetworkAclRequest deleteNetworkAclRequest) throws AmazonServiceException, AmazonClientException { + public DeleteNetworkAclResult deleteNetworkAcl(DeleteNetworkAclRequest deleteNetworkAclRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1078,7 +1333,7 @@ public StartInstancesResult startInstances(StartInstancesRequest startInstancesR } @Override - public void modifyInstanceAttribute(ModifyInstanceAttributeRequest modifyInstanceAttributeRequest) throws AmazonServiceException, AmazonClientException { + public ModifyInstanceAttributeResult modifyInstanceAttribute(ModifyInstanceAttributeRequest modifyInstanceAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1087,18 +1342,33 @@ public ModifyInstancePlacementResult modifyInstancePlacement(ModifyInstancePlace throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public ModifyIdentityIdFormatResult modifyIdentityIdFormat(ModifyIdentityIdFormatRequest modifyIdentityIdFormatRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public CancelReservedInstancesListingResult cancelReservedInstancesListing(CancelReservedInstancesListingRequest cancelReservedInstancesListingRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void deleteDhcpOptions(DeleteDhcpOptionsRequest deleteDhcpOptionsRequest) throws AmazonServiceException, AmazonClientException { + public DeleteDhcpOptionsResult deleteDhcpOptions(DeleteDhcpOptionsRequest deleteDhcpOptionsRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void authorizeSecurityGroupIngress(AuthorizeSecurityGroupIngressRequest authorizeSecurityGroupIngressRequest) throws AmazonServiceException, AmazonClientException { + public DeleteEgressOnlyInternetGatewayResult deleteEgressOnlyInternetGateway(DeleteEgressOnlyInternetGatewayRequest deleteEgressOnlyInternetGatewayRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteNetworkInterfacePermissionResult deleteNetworkInterfacePermission(DeleteNetworkInterfacePermissionRequest deleteNetworkInterfacePermissionRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public AuthorizeSecurityGroupIngressResult authorizeSecurityGroupIngress(AuthorizeSecurityGroupIngressRequest authorizeSecurityGroupIngressRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1118,7 +1388,7 @@ public DescribeCustomerGatewaysResult describeCustomerGateways(DescribeCustomerG } @Override - public void cancelExportTask(CancelExportTaskRequest cancelExportTaskRequest) throws AmazonServiceException, AmazonClientException { + public CancelExportTaskResult cancelExportTask(CancelExportTaskRequest cancelExportTaskRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1143,12 +1413,12 @@ public DescribeVpcClassicLinkResult describeVpcClassicLink(DescribeVpcClassicLin } @Override - public void modifyNetworkInterfaceAttribute(ModifyNetworkInterfaceAttributeRequest modifyNetworkInterfaceAttributeRequest) throws AmazonServiceException, AmazonClientException { + public ModifyNetworkInterfaceAttributeResult modifyNetworkInterfaceAttribute(ModifyNetworkInterfaceAttributeRequest modifyNetworkInterfaceAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void deleteRouteTable(DeleteRouteTableRequest deleteRouteTableRequest) throws AmazonServiceException, AmazonClientException { + public DeleteRouteTableResult deleteRouteTable(DeleteRouteTableRequest deleteRouteTableRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1168,7 +1438,7 @@ public RequestSpotInstancesResult requestSpotInstances(RequestSpotInstancesReque } @Override - public void createTags(CreateTagsRequest createTagsRequest) throws AmazonServiceException, AmazonClientException { + public CreateTagsResult createTags(CreateTagsRequest createTagsRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1183,7 +1453,7 @@ public AttachNetworkInterfaceResult attachNetworkInterface(AttachNetworkInterfac } @Override - public void replaceRoute(ReplaceRouteRequest replaceRouteRequest) throws AmazonServiceException, AmazonClientException { + public ReplaceRouteResult replaceRoute(ReplaceRouteRequest replaceRouteRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1198,7 +1468,7 @@ public CancelBundleTaskResult cancelBundleTask(CancelBundleTaskRequest cancelBun } @Override - public void disableVgwRoutePropagation(DisableVgwRoutePropagationRequest disableVgwRoutePropagationRequest) throws AmazonServiceException, AmazonClientException { + public DisableVgwRoutePropagationResult disableVgwRoutePropagation(DisableVgwRoutePropagationRequest disableVgwRoutePropagationRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1228,7 +1498,12 @@ public PurchaseScheduledInstancesResult purchaseScheduledInstances(PurchaseSched } @Override - public void modifySnapshotAttribute(ModifySnapshotAttributeRequest modifySnapshotAttributeRequest) throws AmazonServiceException, AmazonClientException { + public PurchaseHostReservationResult purchaseHostReservation(PurchaseHostReservationRequest purchaseHostReservationRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ModifySnapshotAttributeResult modifySnapshotAttribute(ModifySnapshotAttributeRequest modifySnapshotAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1248,12 +1523,12 @@ public ModifyVpcEndpointResult modifyVpcEndpoint(ModifyVpcEndpointRequest modify } @Override - public void deleteSpotDatafeedSubscription(DeleteSpotDatafeedSubscriptionRequest deleteSpotDatafeedSubscriptionRequest) throws AmazonServiceException, AmazonClientException { + public DeleteSpotDatafeedSubscriptionResult deleteSpotDatafeedSubscription(DeleteSpotDatafeedSubscriptionRequest deleteSpotDatafeedSubscriptionRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void deleteInternetGateway(DeleteInternetGatewayRequest deleteInternetGatewayRequest) throws AmazonServiceException, AmazonClientException { + public DeleteInternetGatewayResult deleteInternetGateway(DeleteInternetGatewayRequest deleteInternetGatewayRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1288,7 +1563,22 @@ public ConfirmProductInstanceResult confirmProductInstance(ConfirmProductInstanc } @Override - public void disassociateRouteTable(DisassociateRouteTableRequest disassociateRouteTableRequest) throws AmazonServiceException, AmazonClientException { + public DisassociateRouteTableResult disassociateRouteTable(DisassociateRouteTableRequest disassociateRouteTableRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DisassociateIamInstanceProfileResult disassociateIamInstanceProfile(DisassociateIamInstanceProfileRequest disassociateIamInstanceProfileRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DisassociateVpcCidrBlockResult disassociateVpcCidrBlock(DisassociateVpcCidrBlockRequest disassociateVpcCidrBlockRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DisassociateSubnetCidrBlockResult disassociateSubnetCidrBlock(DisassociateSubnetCidrBlockRequest disassociateSubnetCidrBlockRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1298,12 +1588,12 @@ public DescribeVpcAttributeResult describeVpcAttribute(DescribeVpcAttributeReque } @Override - public void revokeSecurityGroupEgress(RevokeSecurityGroupEgressRequest revokeSecurityGroupEgressRequest) throws AmazonServiceException, AmazonClientException { + public RevokeSecurityGroupEgressResult revokeSecurityGroupEgress(RevokeSecurityGroupEgressRequest revokeSecurityGroupEgressRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void deleteNetworkAclEntry(DeleteNetworkAclEntryRequest deleteNetworkAclEntryRequest) throws AmazonServiceException, AmazonClientException { + public DeleteNetworkAclEntryResult deleteNetworkAclEntry(DeleteNetworkAclEntryRequest deleteNetworkAclEntryRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1312,6 +1602,11 @@ public CreateVolumeResult createVolume(CreateVolumeRequest createVolumeRequest) throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public ModifyVolumeResult modifyVolume(ModifyVolumeRequest modifyVolumeRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public DescribeInstanceStatusResult describeInstanceStatus(DescribeInstanceStatusRequest describeInstanceStatusRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); @@ -1333,7 +1628,12 @@ public DescribeReservedInstancesOfferingsResult describeReservedInstancesOfferin } @Override - public void assignPrivateIpAddresses(AssignPrivateIpAddressesRequest assignPrivateIpAddressesRequest) throws AmazonServiceException, AmazonClientException { + public AssignPrivateIpAddressesResult assignPrivateIpAddresses(AssignPrivateIpAddressesRequest assignPrivateIpAddressesRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public AssignIpv6AddressesResult assignIpv6Addresses(AssignIpv6AddressesRequest assignIpv6AddressesRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1343,7 +1643,7 @@ public DescribeSpotFleetRequestHistoryResult describeSpotFleetRequestHistory(Des } @Override - public void deleteSnapshot(DeleteSnapshotRequest deleteSnapshotRequest) throws AmazonServiceException, AmazonClientException { + public DeleteSnapshotResult deleteSnapshot(DeleteSnapshotRequest deleteSnapshotRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1353,12 +1653,12 @@ public ReplaceNetworkAclAssociationResult replaceNetworkAclAssociation(ReplaceNe } @Override - public void disassociateAddress(DisassociateAddressRequest disassociateAddressRequest) throws AmazonServiceException, AmazonClientException { + public DisassociateAddressResult disassociateAddress(DisassociateAddressRequest disassociateAddressRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void createPlacementGroup(CreatePlacementGroupRequest createPlacementGroupRequest) throws AmazonServiceException, AmazonClientException { + public CreatePlacementGroupResult createPlacementGroup(CreatePlacementGroupRequest createPlacementGroupRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1368,17 +1668,17 @@ public BundleInstanceResult bundleInstance(BundleInstanceRequest bundleInstanceR } @Override - public void deletePlacementGroup(DeletePlacementGroupRequest deletePlacementGroupRequest) throws AmazonServiceException, AmazonClientException { + public DeletePlacementGroupResult deletePlacementGroup(DeletePlacementGroupRequest deletePlacementGroupRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void modifySubnetAttribute(ModifySubnetAttributeRequest modifySubnetAttributeRequest) throws AmazonServiceException, AmazonClientException { + public ModifySubnetAttributeResult modifySubnetAttribute(ModifySubnetAttributeRequest modifySubnetAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public void deleteVpc(DeleteVpcRequest deleteVpcRequest) throws AmazonServiceException, AmazonClientException { + public DeleteVpcResult deleteVpc(DeleteVpcRequest deleteVpcRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1398,7 +1698,7 @@ public AllocateAddressResult allocateAddress(AllocateAddressRequest allocateAddr } @Override - public void releaseAddress(ReleaseAddressRequest releaseAddressRequest) throws AmazonServiceException, AmazonClientException { + public ReleaseAddressResult releaseAddress(ReleaseAddressRequest releaseAddressRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1408,7 +1708,12 @@ public ReleaseHostsResult releaseHosts(ReleaseHostsRequest releaseHostsRequest) } @Override - public void resetInstanceAttribute(ResetInstanceAttributeRequest resetInstanceAttributeRequest) throws AmazonServiceException, AmazonClientException { + public ReplaceIamInstanceProfileAssociationResult replaceIamInstanceProfileAssociation(ReplaceIamInstanceProfileAssociationRequest replaceIamInstanceProfileAssociationRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ResetInstanceAttributeResult resetInstanceAttribute(ResetInstanceAttributeRequest resetInstanceAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1423,7 +1728,7 @@ public CreateNatGatewayResult createNatGateway(CreateNatGatewayRequest createNat } @Override - public void replaceNetworkAclEntry(ReplaceNetworkAclEntryRequest replaceNetworkAclEntryRequest) throws AmazonServiceException, AmazonClientException { + public ReplaceNetworkAclEntryResult replaceNetworkAclEntry(ReplaceNetworkAclEntryRequest replaceNetworkAclEntryRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1443,7 +1748,7 @@ public RegisterImageResult registerImage(RegisterImageRequest registerImageReque } @Override - public void resetNetworkInterfaceAttribute(ResetNetworkInterfaceAttributeRequest resetNetworkInterfaceAttributeRequest) throws AmazonServiceException, AmazonClientException { + public ResetNetworkInterfaceAttributeResult resetNetworkInterfaceAttribute(ResetNetworkInterfaceAttributeRequest resetNetworkInterfaceAttributeRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1458,7 +1763,7 @@ public EnableVpcClassicLinkDnsSupportResult enableVpcClassicLinkDnsSupport(Enabl } @Override - public void createVpnConnectionRoute(CreateVpnConnectionRouteRequest createVpnConnectionRouteRequest) throws AmazonServiceException, AmazonClientException { + public CreateVpnConnectionRouteResult createVpnConnectionRoute(CreateVpnConnectionRouteRequest createVpnConnectionRouteRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1608,7 +1913,12 @@ public DescribeBundleTasksResult describeBundleTasks() throws AmazonServiceExcep } @Override - public void revokeSecurityGroupIngress() throws AmazonServiceException, AmazonClientException { + public RevokeSecurityGroupIngressResult revokeSecurityGroupIngress(RevokeSecurityGroupIngressRequest revokeSecurityGroupIngressRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public RevokeSecurityGroupIngressResult revokeSecurityGroupIngress() throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1652,6 +1962,12 @@ public DescribeHostsResult describeHosts() { throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public DescribeIamInstanceProfileAssociationsResult describeIamInstanceProfileAssociations( + DescribeIamInstanceProfileAssociationsRequest describeIamInstanceProfileAssociationsRequest) throws AmazonServiceException, AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public DescribeIdFormatResult describeIdFormat(DescribeIdFormatRequest describeIdFormatRequest) { throw new UnsupportedOperationException("Not supported in mock"); @@ -1733,7 +2049,7 @@ public DescribeReservedInstancesModificationsResult describeReservedInstancesMod } @Override - public void deleteSpotDatafeedSubscription() throws AmazonServiceException, AmazonClientException { + public DeleteSpotDatafeedSubscriptionResult deleteSpotDatafeedSubscription() throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1797,6 +2113,11 @@ public void shutdown() { throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public AmazonEC2Waiters waiters() { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public ResponseMetadata getCachedResponseMetadata(AmazonWebServiceRequest request) { throw new UnsupportedOperationException("Not supported in mock"); diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 3264b512b2de9..632fa56e1e9da 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -43,8 +43,6 @@ thirdPartyAudit.excludes = [ ] integTestCluster { - setting 'cloud.azure.storage.my_account_test.account', 'cloudazureresource' - setting 'cloud.azure.storage.my_account_test.key', 'abcdefgh' keystoreSetting 'azure.client.default.account', 'cloudazureresource' keystoreSetting 'azure.client.default.key', 'abcdefgh' keystoreSetting 'azure.client.secondary.account', 'cloudazureresource' diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java index 79455a78c005c..778fe44f15a3c 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java @@ -22,12 +22,8 @@ import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; import org.elasticsearch.common.blobstore.BlobMetaData; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; import java.io.InputStream; @@ -44,22 +40,6 @@ public interface AzureStorageService { ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES); ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(64, ByteSizeUnit.MB); - final class Storage { - @Deprecated - public static final String PREFIX = "cloud.azure.storage."; - - @Deprecated - public static final Setting STORAGE_ACCOUNTS = Setting.groupSetting(Storage.PREFIX, Setting.Property.NodeScope); - - /** - * Azure timeout (defaults to -1 minute) - * @deprecated We don't want to support global timeout settings anymore - */ - @Deprecated - static final Setting TIMEOUT_SETTING = - Setting.timeSetting("cloud.azure.storage.timeout", TimeValue.timeValueMinutes(-1), Property.NodeScope, Property.Deprecated); - } - boolean doesContainerExist(String account, LocationMode mode, String container); void removeContainer(String account, LocationMode mode, String container) throws URISyntaxException, StorageException; diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java index c928d79c0c242..cefbbf8c606ee 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java @@ -24,6 +24,7 @@ import com.microsoft.azure.storage.RetryExponentialRetry; import com.microsoft.azure.storage.RetryPolicy; import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.BlobListingDetails; import com.microsoft.azure.storage.blob.BlobProperties; import com.microsoft.azure.storage.blob.CloudBlobClient; import com.microsoft.azure.storage.blob.CloudBlobContainer; @@ -32,11 +33,9 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cloud.azure.blobstore.util.SocketAccess; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoryException; @@ -45,46 +44,26 @@ import java.io.OutputStream; import java.net.URI; import java.net.URISyntaxException; +import java.util.EnumSet; import java.util.HashMap; import java.util.Map; public class AzureStorageServiceImpl extends AbstractComponent implements AzureStorageService { final Map storageSettings; - final Map deprecatedStorageSettings; - final Map clients; + final Map clients = new HashMap<>(); - public AzureStorageServiceImpl(Settings settings, Map regularStorageSettings) { + public AzureStorageServiceImpl(Settings settings, Map storageSettings) { super(settings); - if (regularStorageSettings.isEmpty()) { - this.storageSettings = new HashMap<>(); - // We have deprecated settings so we need to migrate them to the new implementation - Tuple> storageSettingsMapTuple = AzureStorageSettings.loadLegacy(settings); - deprecatedStorageSettings = storageSettingsMapTuple.v2(); - if (storageSettingsMapTuple.v1() != null) { - if (storageSettingsMapTuple.v1().getName().equals("default") == false) { - // We add the primary configuration to the list of all settings with its deprecated name in case someone is - // forcing a specific configuration name when creating the repository instance - deprecatedStorageSettings.put(storageSettingsMapTuple.v1().getName(), storageSettingsMapTuple.v1()); - } - // We add the primary configuration to the list of all settings as the "default" one - deprecatedStorageSettings.put("default", storageSettingsMapTuple.v1()); - } else { - // If someone did not register any settings or deprecated settings, they - // basically can't use the plugin - throw new IllegalArgumentException("If you want to use an azure repository, you need to define a client configuration."); - } + this.storageSettings = storageSettings; - - } else { - this.storageSettings = regularStorageSettings; - this.deprecatedStorageSettings = new HashMap<>(); + if (storageSettings.isEmpty()) { + // If someone did not register any settings, they basically can't use the plugin + throw new IllegalArgumentException("If you want to use an azure repository, you need to define a client configuration."); } - this.clients = new HashMap<>(); - logger.debug("starting azure storage client instance"); // We register all regular azure clients @@ -92,12 +71,6 @@ public AzureStorageServiceImpl(Settings settings, Map azureStorageSettingsEntry : this.deprecatedStorageSettings.entrySet()) { - logger.debug("registering deprecated client for account [{}]", azureStorageSettingsEntry.getKey()); - createClient(azureStorageSettingsEntry.getValue()); - } } void createClient(AzureStorageSettings azureStorageSettings) { @@ -123,31 +96,21 @@ void createClient(AzureStorageSettings azureStorageSettings) { } } - CloudBlobClient getSelectedClient(String account, LocationMode mode) { - logger.trace("selecting a client for account [{}], mode [{}]", account, mode.name()); - AzureStorageSettings azureStorageSettings = this.storageSettings.get(account); + CloudBlobClient getSelectedClient(String clientName, LocationMode mode) { + logger.trace("selecting a client named [{}], mode [{}]", clientName, mode.name()); + AzureStorageSettings azureStorageSettings = this.storageSettings.get(clientName); if (azureStorageSettings == null) { - // We can't find a client that has been registered using regular settings so we try deprecated client - azureStorageSettings = this.deprecatedStorageSettings.get(account); - if (azureStorageSettings == null) { - // We did not get an account. That's bad. - if (Strings.hasLength(account)) { - throw new IllegalArgumentException("Can not find named azure client [" + account + - "]. Check your elasticsearch.yml."); - } - throw new IllegalArgumentException("Can not find primary/secondary client using deprecated settings. " + - "Check your elasticsearch.yml."); - } + throw new IllegalArgumentException("Can not find named azure client [" + clientName + "]. Check your settings."); } CloudBlobClient client = this.clients.get(azureStorageSettings.getAccount()); if (client == null) { - throw new IllegalArgumentException("Can not find an azure client for account [" + azureStorageSettings.getAccount() + "]"); + throw new IllegalArgumentException("Can not find an azure client named [" + azureStorageSettings.getAccount() + "]"); } // NOTE: for now, just set the location mode in case it is different; - // only one mode per storage account can be active at a time + // only one mode per storage clientName can be active at a time client.getDefaultRequestOptions().setLocationMode(mode); // Set timeout option if the user sets cloud.azure.storage.timeout or cloud.azure.storage.xxx.timeout (it's negative by default) @@ -291,33 +254,26 @@ public Map listBlobsByPrefix(String account, LocationMode logger.debug("listing container [{}], keyPath [{}], prefix [{}]", container, keyPath, prefix); MapBuilder blobsBuilder = MapBuilder.newMapBuilder(); + EnumSet enumBlobListingDetails = EnumSet.of(BlobListingDetails.METADATA); CloudBlobClient client = this.getSelectedClient(account, mode); CloudBlobContainer blobContainer = client.getContainerReference(container); - SocketAccess.doPrivilegedVoidException(() -> { if (blobContainer.exists()) { - for (ListBlobItem blobItem : blobContainer.listBlobs(keyPath + (prefix == null ? "" : prefix))) { + for (ListBlobItem blobItem : blobContainer.listBlobs(keyPath + (prefix == null ? "" : prefix), false, + enumBlobListingDetails, null, null)) { URI uri = blobItem.getUri(); logger.trace("blob url [{}]", uri); // uri.getPath is of the form /container/keyPath.* and we want to strip off the /container/ // this requires 1 + container.length() + 1, with each 1 corresponding to one of the / String blobPath = uri.getPath().substring(1 + container.length() + 1); - - CloudBlockBlob blob = blobContainer.getBlockBlobReference(blobPath); - - // fetch the blob attributes from Azure (getBlockBlobReference does not do this) - // this is needed to retrieve the blob length (among other metadata) from Azure Storage - blob.downloadAttributes(); - - BlobProperties properties = blob.getProperties(); + BlobProperties properties = ((CloudBlockBlob) blobItem).getProperties(); String name = blobPath.substring(keyPath.length()); logger.trace("blob url [{}], name [{}], size [{}]", uri, name, properties.getLength()); blobsBuilder.put(name, new PlainBlobMetaData(name, properties.getLength())); } } }); - return blobsBuilder.immutableMap(); } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java index 5478ba60e0ea5..b33822eee61f0 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java @@ -20,26 +20,19 @@ package org.elasticsearch.cloud.azure.storage; import com.microsoft.azure.storage.RetryPolicy; -import org.elasticsearch.cloud.azure.storage.AzureStorageService.Storage; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.AffixSetting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.TimeValue; -import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Set; -import static org.elasticsearch.cloud.azure.storage.AzureStorageService.Storage.STORAGE_ACCOUNTS; - public final class AzureStorageSettings { // prefix for azure client settings private static final String PREFIX = "azure.client."; @@ -64,56 +57,20 @@ public final class AzureStorageSettings { key -> SecureSetting.secureString(key, null)); public static final AffixSetting TIMEOUT_SETTING = Setting.affixKeySetting(PREFIX, "timeout", - (key) -> Setting.timeSetting(key, Storage.TIMEOUT_SETTING, Property.NodeScope)); - - - @Deprecated - public static final Setting DEPRECATED_TIMEOUT_SETTING = Setting.affixKeySetting(Storage.PREFIX, "timeout", - (key) -> Setting.timeSetting(key, Storage.TIMEOUT_SETTING, Property.NodeScope, Property.Deprecated)); - @Deprecated - public static final Setting DEPRECATED_ACCOUNT_SETTING = Setting.affixKeySetting(Storage.PREFIX, "account", - (key) -> Setting.simpleString(key, Property.NodeScope, Property.Deprecated)); - @Deprecated - public static final Setting DEPRECATED_KEY_SETTING = Setting.affixKeySetting(Storage.PREFIX, "key", - (key) -> Setting.simpleString(key, Property.NodeScope, Property.Deprecated)); - @Deprecated - public static final Setting DEPRECATED_DEFAULT_SETTING = Setting.affixKeySetting(Storage.PREFIX, "default", - (key) -> Setting.boolSetting(key, false, Property.NodeScope, Property.Deprecated)); - + (key) -> Setting.timeSetting(key, TimeValue.timeValueMinutes(-1), Property.NodeScope)); - @Deprecated - private final String name; private final String account; private final String key; private final TimeValue timeout; - @Deprecated - private final boolean activeByDefault; private final int maxRetries; public AzureStorageSettings(String account, String key, TimeValue timeout, int maxRetries) { - this.name = null; - this.account = account; - this.key = key; - this.timeout = timeout; - this.activeByDefault = false; - this.maxRetries = maxRetries; - } - - @Deprecated - public AzureStorageSettings(String name, String account, String key, TimeValue timeout, boolean activeByDefault, int maxRetries) { - this.name = name; this.account = account; this.key = key; this.timeout = timeout; - this.activeByDefault = activeByDefault; this.maxRetries = maxRetries; } - @Deprecated - public String getName() { - return name; - } - public String getKey() { return key; } @@ -126,11 +83,6 @@ public TimeValue getTimeout() { return timeout; } - @Deprecated - public Boolean isActiveByDefault() { - return activeByDefault; - } - public int getMaxRetries() { return maxRetries; } @@ -138,27 +90,14 @@ public int getMaxRetries() { @Override public String toString() { final StringBuilder sb = new StringBuilder("AzureStorageSettings{"); - sb.append("name='").append(name).append('\''); sb.append(", account='").append(account).append('\''); sb.append(", key='").append(key).append('\''); - sb.append(", activeByDefault='").append(activeByDefault).append('\''); sb.append(", timeout=").append(timeout); sb.append(", maxRetries=").append(maxRetries); sb.append('}'); return sb.toString(); } - /** - * Parses settings and read all legacy settings available under cloud.azure.storage.* - * @param settings settings to parse - * @return A tuple with v1 = primary storage and v2 = secondary storage - */ - @Deprecated - public static Tuple> loadLegacy(Settings settings) { - List storageSettings = createStorageSettingsDeprecated(settings); - return Tuple.tuple(getPrimary(storageSettings), getSecondaries(storageSettings)); - } - /** * Parses settings and read all settings available under azure.client.* * @param settings settings to parse @@ -192,25 +131,6 @@ static AzureStorageSettings getClientSettings(Settings settings, String clientNa } } - @Deprecated - private static List createStorageSettingsDeprecated(Settings settings) { - // ignore global timeout which has the same prefix but does not belong to any group - Settings groups = STORAGE_ACCOUNTS.get(settings.filter((k) -> k.equals(Storage.TIMEOUT_SETTING.getKey()) == false)); - List storageSettings = new ArrayList<>(); - for (String groupName : groups.getAsGroups().keySet()) { - storageSettings.add( - new AzureStorageSettings( - groupName, - getValue(settings, groupName, DEPRECATED_ACCOUNT_SETTING), - getValue(settings, groupName, DEPRECATED_KEY_SETTING), - getValue(settings, groupName, DEPRECATED_TIMEOUT_SETTING), - getValue(settings, groupName, DEPRECATED_DEFAULT_SETTING), - getValue(settings, groupName, MAX_RETRIES_SETTING)) - ); - } - return storageSettings; - } - private static T getConfigValue(Settings settings, String clientName, Setting.AffixSetting clientSetting) { Setting concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName); @@ -222,45 +142,4 @@ public static T getValue(Settings settings, String groupName, Setting set String fullKey = k.toConcreteKey(groupName).toString(); return setting.getConcreteSetting(fullKey).get(settings); } - - @Deprecated - private static AzureStorageSettings getPrimary(List settings) { - if (settings.isEmpty()) { - return null; - } else if (settings.size() == 1) { - // the only storage settings belong (implicitly) to the default primary storage - AzureStorageSettings storage = settings.get(0); - return new AzureStorageSettings(storage.getName(), storage.getAccount(), storage.getKey(), storage.getTimeout(), true, - storage.getMaxRetries()); - } else { - AzureStorageSettings primary = null; - for (AzureStorageSettings setting : settings) { - if (setting.isActiveByDefault()) { - if (primary == null) { - primary = setting; - } else { - throw new SettingsException("Multiple default Azure data stores configured: [" + primary.getName() + "] and [" + setting.getName() + "]"); - } - } - } - if (primary == null) { - throw new SettingsException("No default Azure data store configured"); - } - return primary; - } - } - - @Deprecated - private static Map getSecondaries(List settings) { - Map secondaries = new HashMap<>(); - // when only one setting is defined, we don't have secondaries - if (settings.size() > 1) { - for (AzureStorageSettings setting : settings) { - if (setting.isActiveByDefault() == false) { - secondaries.put(setting.getName(), setting); - } - } - } - return secondaries; - } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java index b90d44264ecf7..2816aa963fa59 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java @@ -62,16 +62,9 @@ public Map getRepositories(Environment env, NamedXCo @Override public List> getSettings() { return Arrays.asList( - AzureStorageService.Storage.STORAGE_ACCOUNTS, AzureStorageSettings.ACCOUNT_SETTING, AzureStorageSettings.KEY_SETTING, AzureStorageSettings.TIMEOUT_SETTING ); } - - @Override - public List getSettingsFilter() { - // Cloud storage API settings using a pattern needed to be hidden - return Arrays.asList(AzureStorageService.Storage.PREFIX + "*.account", AzureStorageService.Storage.PREFIX + "*.key"); - } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java index b232ee12e05c4..68c2186d9855e 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java @@ -23,7 +23,6 @@ import com.microsoft.azure.storage.RetryExponentialRetry; import com.microsoft.azure.storage.blob.CloudBlobClient; import org.elasticsearch.common.settings.MockSecureSettings; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; @@ -32,11 +31,6 @@ import java.util.Map; import static org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl.blobNameFromUri; -import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.DEPRECATED_ACCOUNT_SETTING; -import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.DEPRECATED_DEFAULT_SETTING; -import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.DEPRECATED_KEY_SETTING; -import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.DEPRECATED_TIMEOUT_SETTING; -import static org.elasticsearch.repositories.azure.AzureSettingsParserTests.getConcreteSetting; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -45,18 +39,6 @@ public class AzureStorageServiceTests extends ESTestCase { - @Deprecated - static final Settings deprecatedSettings = Settings.builder() - .put("cloud.azure.storage.azure1.account", "myaccount1") - .put("cloud.azure.storage.azure1.key", "mykey1") - .put("cloud.azure.storage.azure1.default", true) - .put("cloud.azure.storage.azure2.account", "myaccount2") - .put("cloud.azure.storage.azure2.key", "mykey2") - .put("cloud.azure.storage.azure3.account", "myaccount3") - .put("cloud.azure.storage.azure3.key", "mykey3") - .put("cloud.azure.storage.azure3.timeout", "30s") - .build(); - private MockSecureSettings buildSecureSettings() { MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("azure.client.azure1.account", "myaccount1"); @@ -102,23 +84,7 @@ public void testGetSelectedClientNonExisting() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { azureStorageService.getSelectedClient("azure4", LocationMode.PRIMARY_ONLY); }); - assertThat(e.getMessage(), is("Can not find named azure client [azure4]. Check your elasticsearch.yml.")); - } - - public void testGetSelectedClientGlobalTimeout() { - Settings timeoutSettings = Settings.builder() - .setSecureSettings(buildSecureSettings()) - .put(AzureStorageService.Storage.TIMEOUT_SETTING.getKey(), "10s") - .put("azure.client.azure3.timeout", "30s") - .build(); - - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(timeoutSettings); - CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); - assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(10 * 1000)); - CloudBlobClient client3 = azureStorageService.getSelectedClient("azure3", LocationMode.PRIMARY_ONLY); - assertThat(client3.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(30 * 1000)); - - assertSettingDeprecationsAndWarnings(new Setting[]{AzureStorageService.Storage.TIMEOUT_SETTING}); + assertThat(e.getMessage(), is("Can not find named azure client [azure4]. Check your settings.")); } public void testGetSelectedClientDefaultTimeout() { @@ -170,7 +136,7 @@ class AzureStorageServiceMock extends AzureStorageServiceImpl { @Override void createClient(AzureStorageSettings azureStorageSettings) { this.clients.put(azureStorageSettings.getAccount(), - new CloudBlobClient(URI.create("https://" + azureStorageSettings.getName()))); + new CloudBlobClient(URI.create("https://" + azureStorageSettings.getAccount()))); } } @@ -184,87 +150,4 @@ public void testBlobNameFromUri() throws URISyntaxException { name = blobNameFromUri(new URI("https://127.0.0.1/container/path/to/myfile")); assertThat(name, is("path/to/myfile")); } - - // Deprecated settings. We still test them until we remove definitely the deprecated settings - - @Deprecated - public void testGetSelectedClientWithNoSecondary() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(Settings.builder() - .put("cloud.azure.storage.azure1.account", "myaccount1") - .put("cloud.azure.storage.azure1.key", "mykey1") - .build()); - CloudBlobClient client = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); - assertThat(client.getEndpoint(), is(URI.create("https://azure1"))); - assertSettingDeprecationsAndWarnings(new Setting[]{ - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure1") - }); - } - - @Deprecated - public void testGetDefaultClientWithNoSecondary() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(Settings.builder() - .put("cloud.azure.storage.azure1.account", "myaccount1") - .put("cloud.azure.storage.azure1.key", "mykey1") - .build()); - CloudBlobClient client = azureStorageService.getSelectedClient("default", LocationMode.PRIMARY_ONLY); - assertThat(client.getEndpoint(), is(URI.create("https://azure1"))); - assertSettingDeprecationsAndWarnings(new Setting[]{ - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure1") - }); - } - - @Deprecated - public void testGetSelectedClientPrimary() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(deprecatedSettings); - CloudBlobClient client = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); - assertThat(client.getEndpoint(), is(URI.create("https://azure1"))); - assertDeprecatedWarnings(); - } - - @Deprecated - public void testGetSelectedClientSecondary1() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(deprecatedSettings); - CloudBlobClient client = azureStorageService.getSelectedClient("azure2", LocationMode.PRIMARY_ONLY); - assertThat(client.getEndpoint(), is(URI.create("https://azure2"))); - assertDeprecatedWarnings(); - } - - @Deprecated - public void testGetSelectedClientSecondary2() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(deprecatedSettings); - CloudBlobClient client = azureStorageService.getSelectedClient("azure3", LocationMode.PRIMARY_ONLY); - assertThat(client.getEndpoint(), is(URI.create("https://azure3"))); - assertDeprecatedWarnings(); - } - - @Deprecated - public void testGetDefaultClientWithPrimaryAndSecondaries() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(deprecatedSettings); - CloudBlobClient client = azureStorageService.getSelectedClient("default", LocationMode.PRIMARY_ONLY); - assertThat(client.getEndpoint(), is(URI.create("https://azure1"))); - assertDeprecatedWarnings(); - } - - @Deprecated - public void testGetSelectedClientDefault() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(deprecatedSettings); - CloudBlobClient client = azureStorageService.getSelectedClient("default", LocationMode.PRIMARY_ONLY); - assertThat(client.getEndpoint(), is(URI.create("https://azure1"))); - assertDeprecatedWarnings(); - } - - private void assertDeprecatedWarnings() { - assertSettingDeprecationsAndWarnings(new Setting[]{ - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_DEFAULT_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure2"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure2"), - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure3"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure3"), - getConcreteSetting(DEPRECATED_TIMEOUT_SETTING, "azure3") - }); - } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilterTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilterTests.java deleted file mode 100644 index 17b43715253c8..0000000000000 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilterTests.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cloud.azure.storage; - -import org.elasticsearch.common.inject.ModuleTestCase; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsFilter; -import org.elasticsearch.common.settings.SettingsModule; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.plugin.repository.azure.AzureRepositoryPlugin; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.rest.FakeRestRequest; - -import java.io.IOException; - -import static org.hamcrest.Matchers.contains; - -/** - * TODO as we moved credentials to secure settings, we don't need anymore to keep this test in 7.x - */ -public class AzureStorageSettingsFilterTests extends ESTestCase { - static final Settings settings = Settings.builder() - .put("cloud.azure.storage.azure1.account", "myaccount1") - .put("cloud.azure.storage.azure1.key", "mykey1") - .put("cloud.azure.storage.azure1.default", true) - .put("cloud.azure.storage.azure2.account", "myaccount2") - .put("cloud.azure.storage.azure2.key", "mykey2") - .put("cloud.azure.storage.azure3.account", "myaccount3") - .put("cloud.azure.storage.azure3.key", "mykey3") - .build(); - - public void testSettingsFiltering() throws IOException { - AzureRepositoryPlugin p = new AzureRepositoryPlugin(settings); - SettingsModule module = new SettingsModule(Settings.EMPTY, p.getSettings(), p.getSettingsFilter()); - SettingsFilter settingsFilter = ModuleTestCase.bindAndGetInstance(module, SettingsFilter.class); - - // Test using direct filtering - Settings filteredSettings = settingsFilter.filter(settings); - assertThat(filteredSettings.getAsMap().keySet(), contains("cloud.azure.storage.azure1.default")); - - // Test using toXContent filtering - RestRequest request = new FakeRestRequest(); - settingsFilter.addFilterSettingParams(request); - XContentBuilder xContentBuilder = XContentBuilder.builder(JsonXContent.jsonXContent); - xContentBuilder.startObject(); - settings.toXContent(xContentBuilder, request); - xContentBuilder.endObject(); - String filteredSettingsString = xContentBuilder.string(); - filteredSettings = Settings.builder().loadFromSource(filteredSettingsString, xContentBuilder.contentType()).build(); - assertThat(filteredSettings.getAsMap().keySet(), contains("cloud.azure.storage.azure1.default")); - } - -} diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSettingsParserTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSettingsParserTests.java deleted file mode 100644 index d0fbdb98e0315..0000000000000 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSettingsParserTests.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.repositories.azure; - -import org.elasticsearch.cloud.azure.storage.AzureStorageSettings; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; -import org.elasticsearch.test.ESTestCase; - -import java.util.Map; - -import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.DEPRECATED_ACCOUNT_SETTING; -import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.DEPRECATED_DEFAULT_SETTING; -import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.DEPRECATED_KEY_SETTING; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; - -public class AzureSettingsParserTests extends ESTestCase { - - public void testParseTwoSettingsExplicitDefault() { - Settings settings = Settings.builder() - .put("cloud.azure.storage.azure1.account", "myaccount1") - .put("cloud.azure.storage.azure1.key", "mykey1") - .put("cloud.azure.storage.azure1.default", true) - .put("cloud.azure.storage.azure2.account", "myaccount2") - .put("cloud.azure.storage.azure2.key", "mykey2") - .build(); - - Tuple> tuple = AzureStorageSettings.loadLegacy(settings); - assertThat(tuple.v1(), notNullValue()); - assertThat(tuple.v1().getAccount(), is("myaccount1")); - assertThat(tuple.v1().getKey(), is("mykey1")); - assertThat(tuple.v2().keySet(), hasSize(1)); - assertThat(tuple.v2().get("azure2"), notNullValue()); - assertThat(tuple.v2().get("azure2").getAccount(), is("myaccount2")); - assertThat(tuple.v2().get("azure2").getKey(), is("mykey2")); - assertSettingDeprecationsAndWarnings(new Setting[]{ - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_DEFAULT_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure2"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure2") - }); - } - - public void testParseUniqueSettings() { - Settings settings = Settings.builder() - .put("cloud.azure.storage.azure1.account", "myaccount1") - .put("cloud.azure.storage.azure1.key", "mykey1") - .build(); - - Tuple> tuple = AzureStorageSettings.loadLegacy(settings); - assertThat(tuple.v1(), notNullValue()); - assertThat(tuple.v1().getAccount(), is("myaccount1")); - assertThat(tuple.v1().getKey(), is("mykey1")); - assertThat(tuple.v2().keySet(), hasSize(0)); - assertSettingDeprecationsAndWarnings(new Setting[]{ - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure1") - }); - } - - public void testParseTwoSettingsNoDefault() { - Settings settings = Settings.builder() - .put("cloud.azure.storage.azure1.account", "myaccount1") - .put("cloud.azure.storage.azure1.key", "mykey1") - .put("cloud.azure.storage.azure2.account", "myaccount2") - .put("cloud.azure.storage.azure2.key", "mykey2") - .build(); - - try { - AzureStorageSettings.loadLegacy(settings); - fail("Should have failed with a SettingsException (no default data store)"); - } catch (SettingsException ex) { - assertEquals(ex.getMessage(), "No default Azure data store configured"); - } - assertSettingDeprecationsAndWarnings(new Setting[]{ - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure2"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure2"), - }); - } - - public void testParseTwoSettingsTooManyDefaultSet() { - Settings settings = Settings.builder() - .put("cloud.azure.storage.azure1.account", "myaccount1") - .put("cloud.azure.storage.azure1.key", "mykey1") - .put("cloud.azure.storage.azure1.default", true) - .put("cloud.azure.storage.azure2.account", "myaccount2") - .put("cloud.azure.storage.azure2.key", "mykey2") - .put("cloud.azure.storage.azure2.default", true) - .build(); - - try { - AzureStorageSettings.loadLegacy(settings); - fail("Should have failed with a SettingsException (multiple default data stores)"); - } catch (SettingsException ex) { - assertEquals(ex.getMessage(), "Multiple default Azure data stores configured: [azure1] and [azure2]"); - } - assertSettingDeprecationsAndWarnings(new Setting[]{ - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_DEFAULT_SETTING, "azure1"), - getConcreteSetting(DEPRECATED_ACCOUNT_SETTING, "azure2"), - getConcreteSetting(DEPRECATED_KEY_SETTING, "azure2"), - getConcreteSetting(DEPRECATED_DEFAULT_SETTING, "azure2") - }); - } - - public void testParseEmptySettings() { - Tuple> tuple = AzureStorageSettings.loadLegacy(Settings.EMPTY); - assertThat(tuple.v1(), nullValue()); - assertThat(tuple.v2().keySet(), hasSize(0)); - } - - public static Setting getConcreteSetting(Setting setting, String groupName) { - Setting.AffixKey k = (Setting.AffixKey) setting.getRawKey(); - String concreteKey = k.toConcreteKey(groupName).toString(); - return setting.getConcreteSetting(concreteKey); - } -} diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java index aea47f38ef3ef..7eb808e7c956e 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java @@ -36,18 +36,24 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.plugin.repository.azure.AzureRepositoryPlugin; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.repositories.RepositoryVerificationException; import org.elasticsearch.repositories.azure.AzureRepository.Repository; import org.elasticsearch.snapshots.SnapshotMissingException; +import org.elasticsearch.snapshots.SnapshotRestoreException; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.store.MockFSDirectoryService; +import org.elasticsearch.test.store.MockFSIndexStore; import org.junit.After; import org.junit.Before; import java.net.URISyntaxException; +import java.util.Arrays; +import java.util.Collection; import java.util.Locale; import java.util.concurrent.TimeUnit; @@ -65,13 +71,24 @@ supportsDedicatedMasters = false, numDataNodes = 1, transportClientRatio = 0.0) public class AzureSnapshotRestoreTests extends AbstractAzureWithThirdPartyIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(AzureRepositoryPlugin.class, MockFSIndexStore.TestPlugin.class); + } + private String getRepositoryPath() { String testName = "it-" + getTestName(); return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName; } public static String getContainerName() { - String testName = "snapshot-itest-".concat(RandomizedTest.getContext().getRunnerSeedAsString().toLowerCase(Locale.ROOT)); + /* Have a different name per test so that there is no possible race condition. As the long can be negative, + * there mustn't be a hyphen between the 2 concatenated numbers + * (can't have 2 consecutives hyphens on Azure containers) + */ + String testName = "snapshot-itest-" + .concat(RandomizedTest.getContext().getRunnerSeedAsString().toLowerCase(Locale.ROOT)); return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName; } @@ -95,9 +112,10 @@ public final void wipeAzureRepositories() throws StorageException, URISyntaxExce } public void testSimpleWorkflow() { + String repo_name = "test-repo-simple"; Client client = client(); logger.info("--> creating azure repository with path [{}]", getRepositoryPath()); - PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") + PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository(repo_name) .setType("azure").setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) @@ -120,13 +138,13 @@ public void testSimpleWorkflow() { assertThat(client.prepareSearch("test-idx-3").setSize(0).get().getHits().getTotalHits(), equalTo(100L)); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot(repo_name, "test-snap") .setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); - assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots() + assertThat(client.admin().cluster().prepareGetSnapshots(repo_name).setSnapshots("test-snap").get().getSnapshots() .get(0).state(), equalTo(SnapshotState.SUCCESS)); logger.info("--> delete some data"); @@ -148,7 +166,7 @@ public void testSimpleWorkflow() { client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get(); logger.info("--> restore all indices from the snapshot"); - RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") + RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot(repo_name, "test-snap") .setWaitForCompletion(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); @@ -161,7 +179,7 @@ public void testSimpleWorkflow() { logger.info("--> delete indices"); cluster().wipeIndices("test-idx-1", "test-idx-2"); logger.info("--> restore one index after deletion"); - restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true) + restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot(repo_name, "test-snap").setWaitForCompletion(true) .setIndices("test-idx-*", "-test-idx-2").get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); @@ -177,7 +195,7 @@ public void testSimpleWorkflow() { public void testMultipleSnapshots() throws URISyntaxException, StorageException { final String indexName = "test-idx-1"; final String typeName = "doc"; - final String repositoryName = "test-repo"; + final String repositoryName = "test-repo-multiple-snapshot"; final String snapshot1Name = "test-snap-1"; final String snapshot2Name = "test-snap-2"; @@ -314,6 +332,7 @@ public void testMultipleRepositories() { * For issue #26: https://github.com/elastic/elasticsearch-cloud-azure/issues/26 */ public void testListBlobs_26() throws StorageException, URISyntaxException { + final String repositoryName="test-repo-26"; createIndex("test-idx-1", "test-idx-2", "test-idx-3"); ensureGreen(); @@ -327,29 +346,29 @@ public void testListBlobs_26() throws StorageException, URISyntaxException { ClusterAdminClient client = client().admin().cluster(); logger.info("--> creating azure repository without any path"); - PutRepositoryResponse putRepositoryResponse = client.preparePutRepository("test-repo").setType("azure") + PutRepositoryResponse putRepositoryResponse = client.preparePutRepository(repositoryName).setType("azure") .setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); // Get all snapshots - should be empty - assertThat(client.prepareGetSnapshots("test-repo").get().getSnapshots().size(), equalTo(0)); + assertThat(client.prepareGetSnapshots(repositoryName).get().getSnapshots().size(), equalTo(0)); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.prepareCreateSnapshot("test-repo", "test-snap-26") + CreateSnapshotResponse createSnapshotResponse = client.prepareCreateSnapshot(repositoryName, "test-snap-26") .setWaitForCompletion(true).setIndices("test-idx-*").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); // Get all snapshots - should have one - assertThat(client.prepareGetSnapshots("test-repo").get().getSnapshots().size(), equalTo(1)); + assertThat(client.prepareGetSnapshots(repositoryName).get().getSnapshots().size(), equalTo(1)); // Clean the snapshot - client.prepareDeleteSnapshot("test-repo", "test-snap-26").get(); - client.prepareDeleteRepository("test-repo").get(); + client.prepareDeleteSnapshot(repositoryName, "test-snap-26").get(); + client.prepareDeleteRepository(repositoryName).get(); logger.info("--> creating azure repository path [{}]", getRepositoryPath()); - putRepositoryResponse = client.preparePutRepository("test-repo").setType("azure") + putRepositoryResponse = client.preparePutRepository(repositoryName).setType("azure") .setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) @@ -357,15 +376,15 @@ public void testListBlobs_26() throws StorageException, URISyntaxException { assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); // Get all snapshots - should be empty - assertThat(client.prepareGetSnapshots("test-repo").get().getSnapshots().size(), equalTo(0)); + assertThat(client.prepareGetSnapshots(repositoryName).get().getSnapshots().size(), equalTo(0)); logger.info("--> snapshot"); - createSnapshotResponse = client.prepareCreateSnapshot("test-repo", "test-snap-26").setWaitForCompletion(true) + createSnapshotResponse = client.prepareCreateSnapshot(repositoryName, "test-snap-26").setWaitForCompletion(true) .setIndices("test-idx-*").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); // Get all snapshots - should have one - assertThat(client.prepareGetSnapshots("test-repo").get().getSnapshots().size(), equalTo(1)); + assertThat(client.prepareGetSnapshots(repositoryName).get().getSnapshots().size(), equalTo(1)); } @@ -374,23 +393,24 @@ public void testListBlobs_26() throws StorageException, URISyntaxException { * For issue #28: https://github.com/elastic/elasticsearch-cloud-azure/issues/28 */ public void testGetDeleteNonExistingSnapshot_28() throws StorageException, URISyntaxException { + final String repositoryName="test-repo-28"; ClusterAdminClient client = client().admin().cluster(); logger.info("--> creating azure repository without any path"); - PutRepositoryResponse putRepositoryResponse = client.preparePutRepository("test-repo").setType("azure") + PutRepositoryResponse putRepositoryResponse = client.preparePutRepository(repositoryName).setType("azure") .setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); try { - client.prepareGetSnapshots("test-repo").addSnapshots("nonexistingsnapshotname").get(); + client.prepareGetSnapshots(repositoryName).addSnapshots("nonexistingsnapshotname").get(); fail("Shouldn't be here"); } catch (SnapshotMissingException ex) { // Expected } try { - client.prepareDeleteSnapshot("test-repo", "nonexistingsnapshotname").get(); + client.prepareDeleteSnapshot(repositoryName, "nonexistingsnapshotname").get(); fail("Shouldn't be here"); } catch (SnapshotMissingException ex) { // Expected @@ -419,18 +439,19 @@ public void testForbiddenContainerName() throws Exception { * @param correct Is this container name correct */ private void checkContainerName(final String container, final boolean correct) throws Exception { + String repositoryName = "test-repo-checkContainerName"; logger.info("--> creating azure repository with container name [{}]", container); // It could happen that we just removed from a previous test the same container so // we can not create it yet. assertBusy(() -> { try { - PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo") + PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository(repositoryName) .setType("azure").setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), container) .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) .put(Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000), ByteSizeUnit.BYTES) ).get(); - client().admin().cluster().prepareDeleteRepository("test-repo").get(); + client().admin().cluster().prepareDeleteRepository(repositoryName).get(); try { logger.info("--> remove container [{}]", container); cleanRepositoryFiles(container); @@ -451,9 +472,10 @@ private void checkContainerName(final String container, final boolean correct) t * Test case for issue #23: https://github.com/elastic/elasticsearch-cloud-azure/issues/23 */ public void testNonExistingRepo_23() { + final String repositoryName = "test-repo-test23"; Client client = client(); logger.info("--> creating azure repository with path [{}]", getRepositoryPath()); - PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") + PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository(repositoryName) .setType("azure").setSettings(Settings.builder() .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) @@ -463,9 +485,9 @@ public void testNonExistingRepo_23() { logger.info("--> restore non existing snapshot"); try { - client.admin().cluster().prepareRestoreSnapshot("test-repo", "no-existing-snapshot").setWaitForCompletion(true).get(); + client.admin().cluster().prepareRestoreSnapshot(repositoryName, "no-existing-snapshot").setWaitForCompletion(true).get(); fail("Shouldn't be here"); - } catch (SnapshotMissingException ex) { + } catch (SnapshotRestoreException ex) { // Expected } } @@ -475,9 +497,8 @@ public void testNonExistingRepo_23() { */ public void testRemoveAndCreateContainer() throws Exception { final String container = getContainerName().concat("-testremove"); - final AzureStorageService storageService = new AzureStorageServiceImpl(internalCluster().getDefaultSettings(), - AzureStorageSettings.load(internalCluster().getDefaultSettings())); - + final AzureStorageService storageService = new AzureStorageServiceImpl(nodeSettings(0),AzureStorageSettings.load(nodeSettings(0))); + // It could happen that we run this test really close to a previous one // so we might need some time to be able to create the container assertBusy(() -> { diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 3a93146c0865b..2f45cb5ab8f21 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -57,6 +57,7 @@ dependencies { compile 'commons-lang:commons-lang:2.6' compile 'javax.servlet:servlet-api:2.5' compile "org.slf4j:slf4j-api:${versions.slf4j}" + compile "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" hdfsFixture project(':test:fixtures:hdfs-fixture') } @@ -470,9 +471,8 @@ thirdPartyAudit.excludes = [ // internal java api: sun.misc.SignalHandler 'org.apache.hadoop.util.SignalLogger$Handler', - // optional dependencies of slf4j-api - 'org.slf4j.impl.StaticMDCBinder', - 'org.slf4j.impl.StaticMarkerBinder', + // we are not pulling in slf4j-ext, this is okay, Log4j will fallback gracefully + 'org.slf4j.ext.EventData', 'org.apache.log4j.AppenderSkeleton', 'org.apache.log4j.AsyncAppender', @@ -493,12 +493,6 @@ thirdPartyAudit.excludes = [ 'com.squareup.okhttp.ResponseBody' ] -// Gradle 2.13 bundles org.slf4j.impl.StaticLoggerBinder in its core.jar which leaks into the forbidden APIs ant task -// Gradle 2.14+ does not bundle this class anymore so we need to properly exclude it here. -if (GradleVersion.current() > GradleVersion.version("2.13")) { - thirdPartyAudit.excludes += ['org.slf4j.impl.StaticLoggerBinder'] -} - if (JavaVersion.current() > JavaVersion.VERSION_1_8) { thirdPartyAudit.excludes += ['javax.xml.bind.annotation.adapters.HexBinaryAdapter'] } diff --git a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.9.0.jar.sha1 b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.9.0.jar.sha1 new file mode 100644 index 0000000000000..85d3c6534d700 --- /dev/null +++ b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.9.0.jar.sha1 @@ -0,0 +1 @@ +1bd7f6b6ddbaf8a21d6c2b288d0cc5bc5b791cc0 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-LICENSE.txt b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-LICENSE.txt new file mode 100644 index 0000000000000..6279e5206de13 --- /dev/null +++ b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 1999-2005 The Apache Software Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-NOTICE.txt b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-NOTICE.txt new file mode 100644 index 0000000000000..0375732360047 --- /dev/null +++ b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-NOTICE.txt @@ -0,0 +1,5 @@ +Apache log4j +Copyright 2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/20_keep_alive.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/20_keep_alive.yml new file mode 100644 index 0000000000000..8577835502a6d --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/20_keep_alive.yml @@ -0,0 +1,70 @@ +--- + teardown: + + - do: + cluster.put_settings: + body: + transient: + search.max_keep_alive: null + search.default_keep_alive: null + +--- +"Max keep alive": + - skip: + version: " - 6.99.99" + reason: search.max_keep_alive was added in 7.0.0 + + - do: + index: + index: test_scroll + type: test + id: 1 + body: { foo: 1 } + + - do: + index: + index: test_scroll + type: test + id: 2 + body: { foo: 1 } + + - do: + indices.refresh: {} + + - do: + cluster.put_settings: + body: + transient: + search.default_keep_alive: "1m" + search.max_keep_alive: "1m" + + - do: + catch: /.*Keep alive for scroll.*is too large.*/ + search: + index: test_scroll + size: 1 + scroll: 2m + sort: foo + body: + query: + match_all: {} + + - do: + search: + index: test_scroll + size: 1 + scroll: 1m + sort: foo + body: + query: + match_all: {} + + - set: {_scroll_id: scroll_id} + - match: {hits.total: 2 } + - length: {hits.hits: 1 } + + - do: + catch: /.*Keep alive for scroll.*is too large.*/ + scroll: + scroll_id: $scroll_id + scroll: 3m diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/20_completion.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/20_completion.yml index 860c43bbcbf0e..3ac9b4ee2ddc4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/20_completion.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/20_completion.yml @@ -291,3 +291,42 @@ setup: - match: { suggest.result.0.options.1._type: "test" } - match: { suggest.result.0.options.1._source.title: "title_bar" } - match: { suggest.result.0.options.1._source.count: 4 } + +--- +"Skip duplicates should work": + - skip: + version: " - 6.0.99" + reason: skip_duplicates was added in 6.1 + + - do: + index: + index: test + type: test + id: 1 + body: + suggest_1: "bar" + + - do: + index: + index: test + type: test + id: 2 + body: + suggest_1: "bar" + + - do: + indices.refresh: {} + + - do: + search: + body: + suggest: + result: + text: "b" + completion: + field: suggest_1 + skip_duplicates: true + + - length: { suggest.result: 1 } + - length: { suggest.result.0.options: 1 } + - match: { suggest.result.0.options.0.text: "bar" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yml index 778094ec90baf..f0d97382eeb8e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/30_context.yml @@ -276,4 +276,76 @@ setup: - length: { suggest.result: 1 } - length: { suggest.result.0.options: 1 } - - match: { suggest.result.0.options.0.text: "Marriot in Berlin" } + - match: { suggest.result.0.options.0.text: "Marriot in Berlin" } + +--- +"Skip duplicates with contexts should work": + - skip: + version: " - 6.0.99" + reason: skip_duplicates was added in 6.1 + + - do: + index: + index: test + type: test + id: 1 + body: + suggest_context: + input: "foo" + contexts: + color: "red" + + - do: + index: + index: test + type: test + id: 1 + body: + suggest_context: + input: "foo" + contexts: + color: "red" + + - do: + index: + index: test + type: test + id: 2 + body: + suggest_context: + input: "foo" + contexts: + color: "blue" + + - do: + indices.refresh: {} + + - do: + search: + body: + suggest: + result: + text: "foo" + completion: + field: suggest_context + skip_duplicates: true + contexts: + color: "red" + + - length: { suggest.result: 1 } + - length: { suggest.result.0.options: 1 } + - match: { suggest.result.0.options.0.text: "foo" } + + - do: + search: + body: + suggest: + result: + text: "foo" + completion: + skip_duplicates: true + field: suggest_context + + - length: { suggest.result: 1 } + - length: { suggest.result.0.options: 1 } + - match: { suggest.result.0.options.0.text: "foo" } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 58208fd7bd87f..71fb310fa2cd5 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -58,7 +58,7 @@ import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.Store; @@ -440,7 +440,7 @@ protected final void recoverReplica(final IndexShard replica, if (snapshot.size() > 0) { startingSeqNo = PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget); } else { - startingSeqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + startingSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; } final StartRecoveryRequest request = new StartRecoveryRequest(replica.shardId(), targetAllocationId, diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index 0b0d95f5c7fff..c38b6b759de5b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -20,7 +20,6 @@ package org.elasticsearch.test; import com.fasterxml.jackson.core.io.JsonStringEncoder; - import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; @@ -413,7 +412,9 @@ static List> alterateQueries(Set queries, Set> alterateQueries(Set queries, Set stream = Files.newDirectoryStream(dataDir)) { - List dirs = new ArrayList<>(); - for (Path p : stream) { - if (!p.getFileName().toString().startsWith("extra")) { - dirs.add(p); - } - } - list = dirs.toArray(new Path[0]); - } - - if (list.length != 1) { - StringBuilder builder = new StringBuilder("Backwards index must contain exactly one cluster\n"); - for (Path line : list) { - builder.append(line.toString()).append('\n'); - } - throw new IllegalStateException(builder.toString()); - } - Path src = list[0].resolve(NodeEnvironment.NODES_FOLDER); - Path dest = dataDir.resolve(NodeEnvironment.NODES_FOLDER); - assertTrue(Files.exists(src)); - Files.move(src, dest); - assertFalse(Files.exists(src)); - assertTrue(Files.exists(dest)); - Settings.Builder builder = Settings.builder() - .put(settings) - .put(Environment.PATH_DATA_SETTING.getKey(), dataDir.toAbsolutePath()); - return builder.build(); - } @Override protected NamedXContentRegistry xContentRegistry() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java index 63212cddc39b1..d224d9c519c8a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java @@ -83,7 +83,7 @@ private TestZenDiscovery(Settings settings, ThreadPool threadPool, TransportServ ClusterApplier clusterApplier, ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider, AllocationService allocationService) { super(settings, threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier, clusterSettings, - hostsProvider, allocationService); + hostsProvider, allocationService, Collections.emptyList()); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java index 72fb5221ed728..ff6efa3830023 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java @@ -141,7 +141,7 @@ public ReproduceErrorMessageBuilder appendESProperties() { appendProperties(ESIntegTestCase.TESTS_ENABLE_MOCK_MODULES); } appendProperties("tests.assertion.disabled", "tests.security.manager", "tests.nightly", "tests.jvms", - "tests.client.ratio", "tests.heap.size", "tests.bwc", "tests.bwc.version"); + "tests.client.ratio", "tests.heap.size", "tests.bwc", "tests.bwc.version", "build.snapshot"); if (System.getProperty("tests.jvm.argline") != null && !System.getProperty("tests.jvm.argline").isEmpty()) { appendOpt("tests.jvm.argline", "\"" + System.getProperty("tests.jvm.argline") + "\""); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java index 2ccddf6bc5437..81fc934ca6d7e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java @@ -39,7 +39,7 @@ import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportServiceAdapter; +import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportStats; import java.io.IOException; @@ -60,7 +60,7 @@ /** A transport class that doesn't send anything but rather captures all requests for inspection from tests */ public class CapturingTransport implements Transport { - private TransportServiceAdapter adapter; + private TransportService transportService; public static class CapturedRequest { public final DiscoveryNode node; @@ -137,7 +137,7 @@ public void clear() { /** simulate a response for the given requestId */ public void handleResponse(final long requestId, final TransportResponse response) { - adapter.onResponseReceived(requestId).handleResponse(response); + transportService.onResponseReceived(requestId).handleResponse(response); } /** @@ -189,7 +189,7 @@ public void handleRemoteError(final long requestId, final Throwable t) { * @param e the failure */ public void handleError(final long requestId, final TransportException e) { - adapter.onResponseReceived(requestId).handleException(e); + transportService.onResponseReceived(requestId).handleException(e); } @Override @@ -220,8 +220,8 @@ public TransportStats getStats() { } @Override - public void transportServiceAdapter(TransportServiceAdapter adapter) { - this.adapter = adapter; + public void setTransportService(TransportService transportService) { + this.transportService = transportService; } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index 503a7ae1f79d9..0979cfbfea2b7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -44,7 +44,6 @@ import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.tasks.TaskManager; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.tasks.MockTaskManager; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; @@ -58,7 +57,6 @@ import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.transport.TransportServiceAdapter; import org.elasticsearch.transport.TransportStats; import java.io.IOException; @@ -101,16 +99,16 @@ public List> getSettings() { } public static MockTransportService createNewService(Settings settings, Version version, ThreadPool threadPool, - @Nullable ClusterSettings clusterSettings) { + @Nullable ClusterSettings clusterSettings) { // some tests use MockTransportService to do network based testing. Yet, we run tests in multiple JVMs that means // concurrent tests could claim port that another JVM just released and if that test tries to simulate a disconnect it might // be smart enough to re-connect depending on what is tested. To reduce the risk, since this is very hard to debug we use // a different default port range per JVM unless the incoming settings override it int basePort = 10300 + (JVM_ORDINAL * 100); // use a non-default port otherwise some cluster in this JVM might reuse a port - settings = Settings.builder().put(TcpTransport.PORT.getKey(), basePort + "-" + (basePort+100)).put(settings).build(); + settings = Settings.builder().put(TcpTransport.PORT.getKey(), basePort + "-" + (basePort + 100)).put(settings).build(); NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); final Transport transport = new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, - new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList()), version); + new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList()), version); return createNewService(settings, transport, version, threadPool, clusterSettings); } @@ -118,8 +116,8 @@ public static MockTransportService createNewService(Settings settings, Transport @Nullable ClusterSettings clusterSettings) { return new MockTransportService(settings, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> - new DiscoveryNode(Node.NODE_NAME_SETTING.get(settings), UUIDs.randomBase64UUID(), boundAddress.publishAddress(), - Node.NODE_ATTRIBUTES.get(settings).getAsMap(), DiscoveryNode.getRolesFromSettings(settings), version), + new DiscoveryNode(Node.NODE_NAME_SETTING.get(settings), UUIDs.randomBase64UUID(), boundAddress.publishAddress(), + Node.NODE_ATTRIBUTES.get(settings).getAsMap(), DiscoveryNode.getRolesFromSettings(settings), version), clusterSettings); } @@ -129,10 +127,10 @@ public static MockTransportService createNewService(Settings settings, Transport * Build the service. * * @param clusterSettings if non null the the {@linkplain TransportService} will register with the {@link ClusterSettings} for settings - * updates for {@link #TRACE_LOG_EXCLUDE_SETTING} and {@link #TRACE_LOG_INCLUDE_SETTING}. + * updates for {@link #TRACE_LOG_EXCLUDE_SETTING} and {@link #TRACE_LOG_INCLUDE_SETTING}. */ public MockTransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor interceptor, - @Nullable ClusterSettings clusterSettings) { + @Nullable ClusterSettings clusterSettings) { this(settings, transport, threadPool, interceptor, (boundAddress) -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), settings.get(Node.NODE_NAME_SETTING.getKey(), UUIDs.randomBase64UUID())), clusterSettings); @@ -142,7 +140,7 @@ public MockTransportService(Settings settings, Transport transport, ThreadPool t * Build the service. * * @param clusterSettings if non null the the {@linkplain TransportService} will register with the {@link ClusterSettings} for settings - * updates for {@link #TRACE_LOG_EXCLUDE_SETTING} and {@link #TRACE_LOG_INCLUDE_SETTING}. + * updates for {@link #TRACE_LOG_EXCLUDE_SETTING} and {@link #TRACE_LOG_INCLUDE_SETTING}. */ public MockTransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor interceptor, Function localNodeFactory, @@ -163,7 +161,7 @@ public static TransportAddress[] extractTransportAddresses(TransportService tran protected TaskManager createTaskManager() { if (MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.get(settings)) { return new MockTaskManager(settings); - } else { + } else { return super.createTaskManager(); } } @@ -547,8 +545,8 @@ public DelegateTransport(Transport transport) { } @Override - public void transportServiceAdapter(TransportServiceAdapter service) { - transport.transportServiceAdapter(service); + public void setTransportService(TransportService service) { + transport.setTransportService(service); } @Override @@ -641,7 +639,9 @@ public void stop() { } @Override - public void close() { transport.close(); } + public void close() { + transport.close(); + } @Override public Map profileBoundAddresses() { @@ -704,55 +704,47 @@ public void clearTracers() { } @Override - protected Adapter createAdapter() { - return new MockAdapter(); + protected boolean traceEnabled() { + return super.traceEnabled() || activeTracers.isEmpty() == false; } - class MockAdapter extends Adapter { - - @Override - protected boolean traceEnabled() { - return super.traceEnabled() || activeTracers.isEmpty() == false; - } - - @Override - protected void traceReceivedRequest(long requestId, String action) { - super.traceReceivedRequest(requestId, action); - for (Tracer tracer : activeTracers) { - tracer.receivedRequest(requestId, action); - } + @Override + protected void traceReceivedRequest(long requestId, String action) { + super.traceReceivedRequest(requestId, action); + for (Tracer tracer : activeTracers) { + tracer.receivedRequest(requestId, action); } + } - @Override - protected void traceResponseSent(long requestId, String action) { - super.traceResponseSent(requestId, action); - for (Tracer tracer : activeTracers) { - tracer.responseSent(requestId, action); - } + @Override + protected void traceResponseSent(long requestId, String action) { + super.traceResponseSent(requestId, action); + for (Tracer tracer : activeTracers) { + tracer.responseSent(requestId, action); } + } - @Override - protected void traceResponseSent(long requestId, String action, Exception e) { - super.traceResponseSent(requestId, action, e); - for (Tracer tracer : activeTracers) { - tracer.responseSent(requestId, action, e); - } + @Override + protected void traceResponseSent(long requestId, String action, Exception e) { + super.traceResponseSent(requestId, action, e); + for (Tracer tracer : activeTracers) { + tracer.responseSent(requestId, action, e); } + } - @Override - protected void traceReceivedResponse(long requestId, DiscoveryNode sourceNode, String action) { - super.traceReceivedResponse(requestId, sourceNode, action); - for (Tracer tracer : activeTracers) { - tracer.receivedResponse(requestId, sourceNode, action); - } + @Override + protected void traceReceivedResponse(long requestId, DiscoveryNode sourceNode, String action) { + super.traceReceivedResponse(requestId, sourceNode, action); + for (Tracer tracer : activeTracers) { + tracer.receivedResponse(requestId, sourceNode, action); } + } - @Override - protected void traceRequestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) { - super.traceRequestSent(node, requestId, action, options); - for (Tracer tracer : activeTracers) { - tracer.requestSent(node, requestId, action, options); - } + @Override + protected void traceRequestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) { + super.traceRequestSent(node, requestId, action, options); + for (Tracer tracer : activeTracers) { + tracer.requestSent(node, requestId, action, options); } } @@ -802,6 +794,7 @@ public Transport getOriginalTransport() { public Transport.Connection openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException { FilteredConnection filteredConnection = new FilteredConnection(super.openConnection(node, profile)) { final AtomicBoolean closed = new AtomicBoolean(false); + @Override public void close() throws IOException { try { diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 2e252d112df2b..da43f116d4245 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -1047,9 +1047,11 @@ private static class Tracer extends MockTransportService.Tracer { public volatile boolean sawResponseReceived; public AtomicReference expectedEvents = new AtomicReference<>(); + Tracer(Set actions) { this.actions = actions; } + @Override public void receivedRequest(long requestId, String action) { super.receivedRequest(requestId, action); @@ -1446,7 +1448,7 @@ public void handleResponse(StringMessageResponse response) { public void handleException(TransportException exp) { Throwable cause = ExceptionsHelper.unwrapCause(exp); assertThat(cause, instanceOf(ConnectTransportException.class)); - assertThat(((ConnectTransportException)cause).node(), equalTo(nodeA)); + assertThat(((ConnectTransportException) cause).node(), equalTo(nodeA)); } }); @@ -1456,7 +1458,7 @@ public void handleException(TransportException exp) { } catch (Exception e) { Throwable cause = ExceptionsHelper.unwrapCause(e); assertThat(cause, instanceOf(ConnectTransportException.class)); - assertThat(((ConnectTransportException)cause).node(), equalTo(nodeA)); + assertThat(((ConnectTransportException) cause).node(), equalTo(nodeA)); } // wait for the transport to process the sending failure and disconnect from node @@ -1586,26 +1588,26 @@ public void testBlockingIncomingRequests() throws Exception { CountDownLatch latch = new CountDownLatch(1); serviceA.sendRequest(connection, "action", new TestRequest(), TransportRequestOptions.EMPTY, new TransportResponseHandler() { - @Override - public TestResponse newInstance() { - return new TestResponse(); - } + @Override + public TestResponse newInstance() { + return new TestResponse(); + } - @Override - public void handleResponse(TestResponse response) { - latch.countDown(); - } + @Override + public void handleResponse(TestResponse response) { + latch.countDown(); + } - @Override - public void handleException(TransportException exp) { - latch.countDown(); - } + @Override + public void handleException(TransportException exp) { + latch.countDown(); + } - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - }); + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + }); assertFalse(requestProcessed.get()); @@ -1859,14 +1861,20 @@ public String executor() { public void testRegisterHandlerTwice() { serviceB.registerRequestHandler("action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), - (request, message) -> {throw new AssertionError("boom");}); + (request, message) -> { + throw new AssertionError("boom"); + }); expectThrows(IllegalArgumentException.class, () -> serviceB.registerRequestHandler("action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), - (request, message) -> {throw new AssertionError("boom");}) + (request, message) -> { + throw new AssertionError("boom"); + }) ); serviceA.registerRequestHandler("action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), - (request, message) -> {throw new AssertionError("boom");}); + (request, message) -> { + throw new AssertionError("boom"); + }); } public void testTimeoutPerConnection() throws IOException { @@ -1914,11 +1922,12 @@ public void testTimeoutPerConnection() throws IOException { public void testHandshakeWithIncompatVersion() { assumeTrue("only tcp transport has a handshake method", serviceA.getOriginalTransport() instanceof TcpTransport); NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); + Version version = Version.fromString("2.0.0"); try (MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, - new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList()), - Version.fromString("2.0.0"))) { - transport.transportServiceAdapter(serviceA.new Adapter()); - transport.start(); + new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList()), version); + MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); DiscoveryNode node = new DiscoveryNode("TS_TPC", "TS_TPC", transport.boundAddress().publishAddress(), emptyMap(), emptySet(), version0); ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); @@ -1937,9 +1946,10 @@ public void testHandshakeUpdatesVersion() throws IOException { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); Version version = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT); try (MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, - new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList()),version)) { - transport.transportServiceAdapter(serviceA.new Adapter()); - transport.start(); + new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList()), version); + MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); DiscoveryNode node = new DiscoveryNode("TS_TPC", "TS_TPC", transport.boundAddress().publishAddress(), emptyMap(), emptySet(), Version.fromString("2.0.0")); @@ -1956,24 +1966,26 @@ public void testHandshakeUpdatesVersion() throws IOException { } } - public void testTcpHandshake() throws IOException, InterruptedException { assumeTrue("only tcp transport has a handshake method", serviceA.getOriginalTransport() instanceof TcpTransport); TcpTransport originalTransport = (TcpTransport) serviceA.getOriginalTransport(); NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); - try (MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, + MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList())) { @Override protected String handleRequest(MockChannel mockChannel, String profileName, StreamInput stream, long requestId, int messageLengthBytes, Version version, InetSocketAddress remoteAddress, byte status) throws IOException { return super.handleRequest(mockChannel, profileName, stream, requestId, messageLengthBytes, version, remoteAddress, - (byte)(status & ~(1<<3))); // we flip the isHandshake bit back and act like the handler is not found + (byte) (status & ~(1 << 3))); // we flip the isHandshake bit back and act like the handler is not found } - }) { - transport.transportServiceAdapter(serviceA.new Adapter()); - transport.start(); + }; + + try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, transport, Version.CURRENT, threadPool, + null)) { + service.start(); + service.acceptIncomingRequests(); // this acts like a node that doesn't have support for handshakes DiscoveryNode node = new DiscoveryNode("TS_TPC", "TS_TPC", transport.boundAddress().publishAddress(), emptyMap(), emptySet(), version0); @@ -1986,7 +1998,7 @@ protected String handleRequest(MockChannel mockChannel, String profileName, Stre TcpTransport.NodeChannels connection = originalTransport.openConnection( new DiscoveryNode("TS_TPC", "TS_TPC", service.boundAddress().publishAddress(), emptyMap(), emptySet(), version0), null - ) ) { + )) { Version version = originalTransport.executeHandshake(connection.getNode(), connection.channel(TransportRequestOptions.Type.PING), TimeValue.timeValueSeconds(10)); assertEquals(version, Version.CURRENT); @@ -2105,8 +2117,8 @@ public String executor() { } }; - serviceB.sendRequest(nodeA, "action", new TestRequest(randomFrom("fail", "pass")), transportResponseHandler); - serviceA.sendRequest(nodeA, "action", new TestRequest(randomFrom("fail", "pass")), transportResponseHandler); + serviceB.sendRequest(nodeA, "action", new TestRequest(randomFrom("fail", "pass")), transportResponseHandler); + serviceA.sendRequest(nodeA, "action", new TestRequest(randomFrom("fail", "pass")), transportResponseHandler); latch.await(); } @@ -2303,22 +2315,22 @@ public String executor() { TransportRequestOptions.Type.STATE); try (Transport.Connection connection = serviceC.openConnection(serviceB.getLocalNode(), builder.build())) { assertBusy(() -> { // netty for instance invokes this concurrently so we better use assert busy here - TransportStats transportStats = serviceC.transport.getStats(); // we did a single round-trip to do the initial handshake - assertEquals(1, transportStats.getRxCount()); - assertEquals(1, transportStats.getTxCount()); - assertEquals(25, transportStats.getRxSize().getBytes()); - assertEquals(45, transportStats.getTxSize().getBytes()); - }); + TransportStats transportStats = serviceC.transport.getStats(); // we did a single round-trip to do the initial handshake + assertEquals(1, transportStats.getRxCount()); + assertEquals(1, transportStats.getTxCount()); + assertEquals(25, transportStats.getRxSize().getBytes()); + assertEquals(45, transportStats.getTxSize().getBytes()); + }); serviceC.sendRequest(connection, "action", new TestRequest("hello world"), TransportRequestOptions.EMPTY, transportResponseHandler); receivedLatch.await(); assertBusy(() -> { // netty for instance invokes this concurrently so we better use assert busy here - TransportStats transportStats = serviceC.transport.getStats(); // request has ben send - assertEquals(1, transportStats.getRxCount()); - assertEquals(2, transportStats.getTxCount()); - assertEquals(25, transportStats.getRxSize().getBytes()); - assertEquals(91, transportStats.getTxSize().getBytes()); - }); + TransportStats transportStats = serviceC.transport.getStats(); // request has ben send + assertEquals(1, transportStats.getRxCount()); + assertEquals(2, transportStats.getTxCount()); + assertEquals(25, transportStats.getRxSize().getBytes()); + assertEquals(91, transportStats.getTxSize().getBytes()); + }); sendResponseLatch.countDown(); responseLatch.await(); stats = serviceC.transport.getStats(); // response has been received @@ -2398,22 +2410,22 @@ public String executor() { TransportRequestOptions.Type.STATE); try (Transport.Connection connection = serviceC.openConnection(serviceB.getLocalNode(), builder.build())) { assertBusy(() -> { // netty for instance invokes this concurrently so we better use assert busy here - TransportStats transportStats = serviceC.transport.getStats(); // request has ben send - assertEquals(1, transportStats.getRxCount()); - assertEquals(1, transportStats.getTxCount()); - assertEquals(25, transportStats.getRxSize().getBytes()); - assertEquals(45, transportStats.getTxSize().getBytes()); - }); + TransportStats transportStats = serviceC.transport.getStats(); // request has ben send + assertEquals(1, transportStats.getRxCount()); + assertEquals(1, transportStats.getTxCount()); + assertEquals(25, transportStats.getRxSize().getBytes()); + assertEquals(45, transportStats.getTxSize().getBytes()); + }); serviceC.sendRequest(connection, "action", new TestRequest("hello world"), TransportRequestOptions.EMPTY, transportResponseHandler); receivedLatch.await(); assertBusy(() -> { // netty for instance invokes this concurrently so we better use assert busy here - TransportStats transportStats = serviceC.transport.getStats(); // request has ben send - assertEquals(1, transportStats.getRxCount()); - assertEquals(2, transportStats.getTxCount()); - assertEquals(25, transportStats.getRxSize().getBytes()); - assertEquals(91, transportStats.getTxSize().getBytes()); - }); + TransportStats transportStats = serviceC.transport.getStats(); // request has ben send + assertEquals(1, transportStats.getRxCount()); + assertEquals(2, transportStats.getTxCount()); + assertEquals(25, transportStats.getRxSize().getBytes()); + assertEquals(91, transportStats.getTxSize().getBytes()); + }); sendResponseLatch.countDown(); responseLatch.await(); stats = serviceC.transport.getStats(); // exception response has been received