From e8efd85808fa071e233904db6b3c3a2bc5dadbb3 Mon Sep 17 00:00:00 2001 From: PnPie Date: Wed, 13 Dec 2017 23:14:22 +0100 Subject: [PATCH 01/12] Add Refresh API for RestHighLevelClient --- .../elasticsearch/client/IndicesClient.java | 23 ++++++++++ .../org/elasticsearch/client/Request.java | 6 +++ .../client/RestHighLevelClient.java | 2 - .../elasticsearch/client/IndicesClientIT.java | 38 +++++++++++++--- .../indices/refresh/RefreshResponse.java | 43 ++++++++++++++++++- 5 files changed, 103 insertions(+), 9 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index 57dafbba50994..3e5e00a224d9c 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -27,6 +27,8 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import java.io.IOException; import java.util.Collections; @@ -111,4 +113,25 @@ public void openIndexAsync(OpenIndexRequest openIndexRequest, ActionListener + * See + * Refresh API on elastic.co + */ + public final RefreshResponse refresh(RefreshRequest refreshRequest, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(refreshRequest, Request::refresh, RefreshResponse::fromXContent, + Collections.emptySet(), headers); + } + + /** + * Asynchronously refresh one or more index using the Refresh API + *

+ * See + * Refresh API on elastic.co + */ + public final void refreshAsync(RefreshRequest refreshRequest, ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(refreshRequest, Request::refresh, RefreshResponse::fromXContent, + listener, Collections.emptySet(), headers); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java index dd08179cf6297..9525bb52515f8 100755 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -32,6 +32,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.get.GetRequest; @@ -152,6 +153,11 @@ static Request openIndex(OpenIndexRequest openIndexRequest) { return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); } + static Request refresh(RefreshRequest refreshRequest) { + String endpoint = endpoint(refreshRequest.indices(), Strings.EMPTY_ARRAY, "_refresh"); + return new Request(HttpPost.METHOD_NAME, endpoint, Collections.emptyMap(), null); + } + static Request createIndex(CreateIndexRequest createIndexRequest) throws IOException { String endpoint = endpoint(createIndexRequest.indices(), Strings.EMPTY_ARRAY, ""); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index ca244eee88c62..5f330256f6846 100755 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -26,8 +26,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; -import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteRequest; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 5f356c4c29f5e..21e2e1b0896fd 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -27,22 +27,22 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.rest.RestStatus; - -import java.io.IOException; -import java.util.Locale; - -import static org.hamcrest.Matchers.equalTo; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.rest.RestStatus; +import java.io.IOException; +import java.util.Locale; import java.util.Map; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; +import static org.hamcrest.Matchers.equalTo; public class IndicesClientIT extends ESRestHighLevelClientTestCase { @@ -180,6 +180,32 @@ public void testOpenNonExistentIndex() throws IOException { assertEquals(RestStatus.NOT_FOUND, strictException.status()); } + public void testRefresh() throws IOException { + { + String[] indices = randomIndices(1, 5); + for (String index : indices) { + createIndex(index); + } + RefreshRequest refreshRequest = new RefreshRequest(indices); + RefreshResponse refreshResponse = + execute(refreshRequest, highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync); + // 10 shards per index by default + assertThat(refreshResponse.getTotalShards(), equalTo(indices.length * 10)); + } + { + String[] nonExistentIndices = randomIndices(1, 5); + for (String nonExistentIndex : nonExistentIndices) { + if (indexExists(nonExistentIndex)) { + return; + } + } + RefreshRequest refreshRequest = new RefreshRequest(nonExistentIndices); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> execute(refreshRequest, highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync)); + assertEquals(RestStatus.NOT_FOUND, exception.status()); + } + } + private static String[] randomIndices(int minIndicesNum, int maxIndicesNum) { int numIndices = randomIntBetween(minIndicesNum, maxIndicesNum); String[] indices = new String[numIndices]; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java index ba3ec31c6a544..2df2dc1c3e575 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java @@ -21,13 +21,33 @@ import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import java.io.IOException; import java.util.List; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + /** * The response of a refresh action. */ -public class RefreshResponse extends BroadcastResponse { +public class RefreshResponse extends BroadcastResponse implements ToXContentFragment { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("refresh", + true, arg -> (RefreshResponse) arg[0]); + + static { + ConstructingObjectParser shardsParser = new ConstructingObjectParser<>("_shards", true, + arg -> new RefreshResponse((int) arg[0], (int) arg[1], (int) arg[2], null)); + shardsParser.declareInt(constructorArg(), new ParseField(Fields.TOTAL)); + shardsParser.declareInt(constructorArg(), new ParseField(Fields.SUCCESSFUL)); + shardsParser.declareInt(constructorArg(), new ParseField(Fields.FAILED)); + PARSER.declareObject(constructorArg(), shardsParser, new ParseField(Fields._SHARDS)); + } RefreshResponse() { } @@ -35,4 +55,25 @@ public class RefreshResponse extends BroadcastResponse { RefreshResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields._SHARDS); + builder.field(Fields.TOTAL, getTotalShards()); + builder.field(Fields.SUCCESSFUL, getSuccessfulShards()); + builder.field(Fields.FAILED, getFailedShards()); + builder.endObject(); + return builder; + } + + public static RefreshResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + static final class Fields { + static final String _SHARDS = "_shards"; + static final String TOTAL = "total"; + static final String SUCCESSFUL = "successful"; + static final String FAILED = "failed"; + } } From 77da3d5d67ac5cafbda5b985d3e94d8f306e8078 Mon Sep 17 00:00:00 2001 From: PnPie Date: Tue, 16 Jan 2018 22:38:58 +0100 Subject: [PATCH 02/12] changes --- .../indices/refresh/RefreshResponse.java | 19 --------------- .../support/broadcast/BroadcastResponse.java | 23 +++++++++++++++++-- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java index 2df2dc1c3e575..114d1800982d8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java @@ -24,10 +24,8 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentFragment; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import java.io.IOException; import java.util.List; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; @@ -56,24 +54,7 @@ public class RefreshResponse extends BroadcastResponse implements ToXContentFrag super(totalShards, successfulShards, failedShards, shardFailures); } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(Fields._SHARDS); - builder.field(Fields.TOTAL, getTotalShards()); - builder.field(Fields.SUCCESSFUL, getSuccessfulShards()); - builder.field(Fields.FAILED, getFailedShards()); - builder.endObject(); - return builder; - } - public static RefreshResponse fromXContent(XContentParser parser) { return PARSER.apply(parser, null); } - - static final class Fields { - static final String _SHARDS = "_shards"; - static final String TOTAL = "total"; - static final String SUCCESSFUL = "successful"; - static final String FAILED = "failed"; - } } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java index e608e8e0ab7d6..746780c765722 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java @@ -23,8 +23,10 @@ import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.shard.ShardNotFoundException; +import org.elasticsearch.rest.RestStatus; import java.io.IOException; import java.util.List; @@ -34,7 +36,7 @@ /** * Base class for all broadcast operation based responses. */ -public class BroadcastResponse extends ActionResponse { +public class BroadcastResponse extends ActionResponse implements ToXContentFragment{ private static final ShardOperationFailedException[] EMPTY = new ShardOperationFailedException[0]; private int totalShards; private int successfulShards; @@ -127,4 +129,21 @@ public void writeTo(StreamOutput out) throws IOException { exp.writeTo(out); } } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields._SHARDS); + builder.field(Fields.TOTAL, getTotalShards()); + builder.field(Fields.SUCCESSFUL, getSuccessfulShards()); + builder.field(Fields.FAILED, getFailedShards()); + builder.endObject(); + return builder; + } + + public static final class Fields { + public static final String _SHARDS = "_shards"; + public static final String TOTAL = "total"; + public static final String SUCCESSFUL = "successful"; + public static final String FAILED = "failed"; + } } From 1753a337bffea4f7fbbd9bc472abbc7780079f11 Mon Sep 17 00:00:00 2001 From: PnPie Date: Sat, 27 Jan 2018 11:14:03 +0100 Subject: [PATCH 03/12] update --- CONTRIBUTING.md | 12 +- .../elasticsearch/gradle/BuildPlugin.groovy | 126 +- .../plugin/MetaPluginBuildPlugin.groovy | 99 ++ .../MetaPluginPropertiesExtension.groovy | 46 + .../plugin/MetaPluginPropertiesTask.groovy | 68 ++ .../gradle/plugin/PluginBuildPlugin.groovy | 6 +- .../gradle/precommit/JarHellTask.groovy | 2 +- .../gradle/precommit/LoggerUsageTask.groovy | 2 +- .../precommit/NamingConventionsTask.groovy | 2 +- .../gradle/test/ClusterFormationTasks.groovy | 26 +- .../elasticsearch/gradle/test/NodeInfo.groovy | 2 +- .../elasticsearch.es-meta-plugin.properties | 20 + .../elasticsearch/client/IndicesClient.java | 76 +- .../org/elasticsearch/client/Request.java | 52 +- .../client/RestHighLevelClient.java | 21 + .../java/org/elasticsearch/client/CrudIT.java | 60 + .../elasticsearch/client/IndicesClientIT.java | 156 ++- .../elasticsearch/client/RequestTests.java | 223 ++-- .../IndicesClientDocumentationIT.java | 248 +++- distribution/build.gradle | 2 +- distribution/bwc/build.gradle | 23 +- .../plugins/InstallPluginCommand.java | 41 +- .../high-level/apis/close_index.asciidoc | 70 ++ .../high-level/apis/createindex.asciidoc | 8 +- docs/java-rest/high-level/apis/index.asciidoc | 15 + .../high-level/apis/open_index.asciidoc | 81 ++ .../high-level/apis/putmapping.asciidoc | 71 ++ .../high-level/supported-apis.asciidoc | 3 + .../painless-getting-started.asciidoc | 2 +- .../bucket/composite-aggregation.asciidoc | 36 +- .../metrics/tophits-aggregation.asciidoc | 2 +- .../bucket-selector-aggregation.asciidoc | 2 +- .../analysis/analyzers/lang-analyzer.asciidoc | 6 +- .../reference/index-modules/translog.asciidoc | 82 +- docs/reference/mapping/types/numeric.asciidoc | 6 +- docs/reference/mapping/types/range.asciidoc | 71 +- .../migration/migrate_7_0/mappings.asciidoc | 6 +- docs/reference/modules/gateway.asciidoc | 9 + .../query-dsl/query-string-syntax.asciidoc | 2 +- docs/reference/query-dsl/term-query.asciidoc | 2 + .../search/request/highlighting.asciidoc | 2 +- .../reference/search/request/rescore.asciidoc | 3 +- libs/build.gradle | 0 .../src/main/eclipse-build.gradle | 2 + .../src/test/eclipse-build.gradle | 6 + .../elasticsearch/nio/AbstractNioChannel.java | 34 +- .../nio/AcceptorEventHandler.java | 2 +- .../nio/BytesChannelContext.java | 166 +++ .../elasticsearch/nio/BytesReadContext.java | 64 - .../elasticsearch/nio/BytesWriteContext.java | 111 -- .../nio/BytesWriteOperation.java | 88 ++ .../org/elasticsearch/nio/ChannelContext.java | 45 + .../org/elasticsearch/nio/ChannelFactory.java | 4 +- .../org/elasticsearch/nio/ESSelector.java | 5 + .../org/elasticsearch/nio/EventHandler.java | 2 +- .../nio/InboundChannelBuffer.java | 4 + .../org/elasticsearch/nio/NioChannel.java | 2 + .../nio/NioServerSocketChannel.java | 24 +- .../elasticsearch/nio/NioSocketChannel.java | 65 +- .../elasticsearch/nio/SelectionKeyUtils.java | 55 +- .../nio/ServerChannelContext.java | 62 + .../nio/SocketChannelContext.java | 129 ++ .../elasticsearch/nio/SocketEventHandler.java | 62 +- .../org/elasticsearch/nio/SocketSelector.java | 14 +- .../org/elasticsearch/nio/WriteOperation.java | 74 +- .../nio/AcceptorEventHandlerTests.java | 12 +- .../nio/BytesChannelContextTests.java | 337 ++++++ .../nio/BytesReadContextTests.java | 142 --- .../nio/BytesWriteContextTests.java | 212 ---- .../nio/ChannelFactoryTests.java | 3 +- .../nio/NioServerSocketChannelTests.java | 76 +- .../nio/NioSocketChannelTests.java | 83 +- .../nio/SocketEventHandlerTests.java | 120 +- .../nio/SocketSelectorTests.java | 48 +- .../nio/WriteOperationTests.java | 61 +- modules/build.gradle | 2 +- .../CustomReflectionObjectHandler.java | 6 + modules/lang-painless/build.gradle | 1 + .../lang-painless/spi/build.gradle | 35 +- .../painless/spi/PainlessExtension.java | 0 .../elasticsearch/painless/spi/Whitelist.java | 0 .../painless/spi/WhitelistLoader.java | 0 .../painless/AnalyzerCaster.java | 1042 ++++++++--------- .../elasticsearch/painless/Definition.java | 155 ++- .../elasticsearch/painless/MethodWriter.java | 91 +- .../painless/node/AExpression.java | 5 +- .../painless/node/EAssignment.java | 43 +- .../elasticsearch/painless/node/EBinary.java | 37 +- .../painless/node/ECapturingFunctionRef.java | 5 +- .../elasticsearch/painless/node/ECast.java | 3 +- .../elasticsearch/painless/node/EComp.java | 24 +- .../painless/node/EConditional.java | 3 +- .../elasticsearch/painless/node/EElvis.java | 4 +- .../painless/node/EFunctionRef.java | 5 +- .../elasticsearch/painless/node/ELambda.java | 5 +- .../elasticsearch/painless/node/EUnary.java | 6 +- .../painless/node/SSubEachArray.java | 11 +- .../painless/node/SSubEachIterable.java | 3 +- .../painless/AnalyzerCasterTests.java | 85 +- .../painless/node/NodeToStringTests.java | 4 +- .../rest-api-spec/test/painless/15_update.yml | 2 +- .../rest-api-spec/test/painless/30_search.yml | 36 + .../index/mapper/ScaledFloatFieldMapper.java | 8 +- .../index/mapper/TokenCountFieldMapper.java | 4 +- .../mapper/TokenCountFieldMapperTests.java | 4 +- .../join/mapper/ParentIdFieldMapper.java | 4 +- .../join/mapper/ParentJoinFieldMapper.java | 8 +- .../mapper/ParentJoinFieldMapperTests.java | 34 +- .../join/query/HasChildQueryBuilderTests.java | 2 +- .../query/HasParentQueryBuilderTests.java | 2 +- .../LegacyHasChildQueryBuilderTests.java | 4 +- .../LegacyHasParentQueryBuilderTests.java | 6 +- .../LegacyParentIdQueryBuilderTests.java | 4 +- .../join/query/ParentIdQueryBuilderTests.java | 2 +- .../percolator/CandidateQueryTests.java | 4 +- .../PercolateQueryBuilderTests.java | 4 +- .../PercolatorFieldMapperTests.java | 10 +- .../index/rankeval/RankEvalResponse.java | 2 - .../index/rankeval/RankEvalResponseTests.java | 32 +- .../rest-api-spec/test/rank_eval/10_basic.yml | 60 +- .../rest-api-spec/test/rank_eval/20_dcg.yml | 40 +- .../test/rank_eval/30_failures.yml | 14 +- modules/reindex/build.gradle | 7 - .../index/reindex/CancelTests.java | 22 +- .../index/reindex/ReindexFailureTests.java | 8 + .../ICUCollationKeywordFieldMapper.java | 8 +- .../ICUCollationKeywordFieldMapperTests.java | 4 +- plugins/discovery-azure-classic/build.gradle | 2 +- plugins/discovery-ec2/build.gradle | 5 +- plugins/discovery-gce/build.gradle | 2 +- plugins/examples/meta-plugin/build.gradle | 38 +- plugins/ingest-attachment/build.gradle | 4 +- plugins/jvm-example/build.gradle | 2 +- .../index/mapper/size/SizeFieldMapper.java | 2 +- .../index/mapper/size/SizeMappingTests.java | 2 +- plugins/repository-hdfs/build.gradle | 8 +- .../transport/nio/NioTransport.java | 49 +- .../nio/TcpNioServerSocketChannel.java | 11 +- .../transport/nio/TcpNioSocketChannel.java | 9 +- qa/build.gradle | 0 qa/reindex-from-old/build.gradle | 5 +- .../test/rank-eval/30_template.yml | 10 +- .../rest-api-spec/api/indices.create.json | 4 - .../api/indices.put_mapping.json | 4 - .../test/indices.stats/13_fields.yml | 49 +- .../test/search.aggregation/230_composite.yml | 83 +- server/build.gradle | 29 +- server/licenses/joda-time-2.9.5.jar.sha1 | 1 - server/licenses/joda-time-2.9.9.jar.sha1 | 1 + .../main/java/org/elasticsearch/Version.java | 12 + .../alias/get/TransportGetAliasesAction.java | 3 +- .../clear/ClearIndicesCacheResponse.java | 5 +- .../TransportClearIndicesCacheAction.java | 4 +- .../indices/close/CloseIndexResponse.java | 24 +- .../close/TransportCloseIndexAction.java | 1 - .../CreateIndexClusterStateUpdateRequest.java | 10 +- .../indices/create/CreateIndexRequest.java | 21 +- .../create/CreateIndexRequestBuilder.java | 6 - .../create/TransportCreateIndexAction.java | 2 +- .../admin/indices/flush/FlushResponse.java | 4 +- .../indices/flush/TransportFlushAction.java | 5 +- .../forcemerge/ForceMergeResponse.java | 4 +- .../forcemerge/TransportForceMergeAction.java | 4 +- .../PutMappingClusterStateUpdateRequest.java | 11 - .../mapping/put/PutMappingRequest.java | 33 +- .../mapping/put/PutMappingRequestBuilder.java | 6 - .../mapping/put/PutMappingResponse.java | 25 +- .../put/TransportPutMappingAction.java | 1 - .../admin/indices/open/OpenIndexResponse.java | 2 +- .../indices/recovery/RecoveryResponse.java | 5 +- .../recovery/TransportRecoveryAction.java | 4 +- .../indices/refresh/RefreshResponse.java | 26 +- .../refresh/TransportRefreshAction.java | 5 +- .../rollover/TransportRolloverAction.java | 2 +- .../segments/IndicesSegmentResponse.java | 5 +- .../TransportIndicesSegmentsAction.java | 4 +- .../shards/IndicesShardStoresResponse.java | 5 +- .../indices/shrink/TransportResizeAction.java | 2 +- .../indices/stats/IndicesStatsResponse.java | 5 +- .../stats/TransportIndicesStatsAction.java | 4 +- .../get/TransportUpgradeStatusAction.java | 4 +- .../upgrade/get/UpgradeStatusResponse.java | 6 +- .../upgrade/post/TransportUpgradeAction.java | 4 +- .../indices/upgrade/post/UpgradeResponse.java | 5 +- .../query/TransportValidateQueryAction.java | 3 +- .../validate/query/ValidateQueryResponse.java | 5 +- .../action/get/MultiGetRequest.java | 98 +- .../action/get/MultiGetResponse.java | 113 +- .../search/AbstractSearchAsyncAction.java | 8 +- .../DefaultShardOperationFailedException.java | 25 +- .../support/broadcast/BroadcastResponse.java | 72 +- .../node/TransportBroadcastByNodeAction.java | 5 +- .../TransportBroadcastReplicationAction.java | 8 +- .../action/update/UpdateRequest.java | 25 + .../client/transport/TransportClient.java | 13 +- .../cluster/metadata/MetaData.java | 7 +- .../metadata/MetaDataCreateIndexService.java | 2 +- .../metadata/MetaDataIndexAliasesService.java | 2 +- .../MetaDataIndexTemplateService.java | 2 +- .../metadata/MetaDataIndexUpgradeService.java | 2 +- .../metadata/MetaDataMappingService.java | 8 +- .../cluster/node/DiscoveryNodes.java | 92 +- .../common/settings/Setting.java | 4 +- .../common/util/CollectionUtils.java | 38 + .../common/xcontent/XContentBuilder.java | 36 +- .../org/elasticsearch/index/IndexService.java | 2 +- .../org/elasticsearch/index/VersionType.java | 5 + .../analysis/SynonymTokenFilterFactory.java | 4 +- .../index/engine/CombinedDeletionPolicy.java | 68 +- .../elasticsearch/index/engine/Engine.java | 8 + .../index/engine/InternalEngine.java | 83 +- .../fielddata/IndexFieldDataService.java | 2 +- .../index/fielddata/ShardFieldData.java | 6 +- .../elasticsearch/index/get/GetResult.java | 9 +- .../index/mapper/CompletionFieldMapper.java | 8 +- .../index/mapper/DateFieldMapper.java | 8 +- .../index/mapper/DocumentMapper.java | 4 +- .../index/mapper/DocumentParser.java | 4 +- .../index/mapper/FieldMapper.java | 9 +- .../index/mapper/FieldNamesFieldMapper.java | 11 - .../index/mapper/FieldTypeLookup.java | 83 +- .../index/mapper/GeoPointFieldMapper.java | 4 +- .../index/mapper/GeoShapeFieldMapper.java | 17 +- .../index/mapper/IdFieldMapper.java | 2 +- .../index/mapper/IndexFieldMapper.java | 2 +- .../index/mapper/IpFieldMapper.java | 4 +- .../index/mapper/KeywordFieldMapper.java | 8 +- .../index/mapper/MappedFieldType.java | 23 +- .../elasticsearch/index/mapper/Mapper.java | 2 +- .../index/mapper/MapperService.java | 35 +- .../elasticsearch/index/mapper/Mapping.java | 8 +- .../index/mapper/MetadataFieldMapper.java | 6 +- .../index/mapper/NumberFieldMapper.java | 4 +- .../index/mapper/ObjectMapper.java | 11 +- .../index/mapper/ParentFieldMapper.java | 4 +- .../index/mapper/ParsedDocument.java | 2 +- .../index/mapper/RangeFieldMapper.java | 27 +- .../index/mapper/RootObjectMapper.java | 8 +- .../index/mapper/RoutingFieldMapper.java | 2 +- .../index/mapper/SeqNoFieldMapper.java | 2 +- .../index/mapper/SourceFieldMapper.java | 2 +- .../index/mapper/TextFieldMapper.java | 29 +- .../index/mapper/TypeFieldMapper.java | 2 +- .../index/mapper/UidFieldMapper.java | 2 +- .../index/mapper/VersionFieldMapper.java | 2 +- .../search/SimpleQueryStringQueryParser.java | 13 +- .../index/seqno/CountedBitSet.java | 36 +- .../seqno/GlobalCheckpointSyncAction.java | 5 +- .../index/seqno/LocalCheckpointTracker.java | 13 +- .../elasticsearch/index/shard/IndexShard.java | 22 +- .../index/shard/StoreRecovery.java | 2 +- .../org/elasticsearch/index/store/Store.java | 10 +- .../index/translog/MultiSnapshot.java | 5 +- .../elasticsearch/indices/IndicesService.java | 2 +- .../elasticsearch/monitor/jvm/JvmInfo.java | 12 +- .../org/elasticsearch/monitor/jvm/JvmPid.java | 49 + .../elasticsearch/plugins/ActionPlugin.java | 10 + .../admin/indices/RestCreateIndexAction.java | 1 - .../admin/indices/RestGetAliasesAction.java | 1 - .../admin/indices/RestPutMappingAction.java | 1 - .../CompositeAggregationBuilder.java | 6 +- .../CompositeAggregationFactory.java | 7 +- .../bucket/composite/CompositeAggregator.java | 17 +- .../composite/CompositeValuesComparator.java | 2 +- .../composite/CompositeValuesSource.java | 18 +- .../CompositeValuesSourceBuilder.java | 36 +- .../CompositeValuesSourceConfig.java | 22 +- .../DateHistogramValuesSourceBuilder.java | 13 +- .../HistogramValuesSourceBuilder.java | 4 +- .../bucket/composite/InternalComposite.java | 88 +- .../composite/TermsValuesSourceBuilder.java | 2 +- .../metrics/avg/AvgAggregator.java | 24 +- .../aggregations/metrics/avg/InternalAvg.java | 15 +- .../scripted/ScriptedMetricAggregator.java | 2 + .../metrics/stats/InternalStats.java | 13 +- .../metrics/stats/StatsAggregator.java | 23 +- .../extended/ExtendedStatsAggregator.java | 41 +- .../stats/extended/InternalExtendedStats.java | 15 +- .../aggregations/metrics/sum/InternalSum.java | 15 +- .../metrics/sum/SumAggregator.java | 24 +- .../BucketScriptPipelineAggregator.java | 3 +- .../aggregations/support/ValuesSource.java | 5 +- .../support/values/ScriptBytesValues.java | 2 + .../subphase/ScriptFieldsFetchSubPhase.java | 2 + .../search/sort/ScriptSortBuilder.java | 5 +- .../org/elasticsearch/monitor/jvm/JvmPid.java | 17 +- .../close/CloseIndexResponseTests.java | 60 + .../admin/indices/create/CreateIndexIT.java | 2 +- .../create/CreateIndexRequestTests.java | 4 +- .../mapping/put/PutMappingRequestTests.java | 84 ++ .../mapping/put/PutMappingResponseTests.java | 85 ++ .../indices/stats/IndicesStatsTests.java | 5 +- .../action/get/MultiGetRequestTests.java | 61 + .../action/get/MultiGetResponseTests.java | 83 ++ .../TransportBroadcastByNodeActionTests.java | 6 +- .../BroadcastReplicationTests.java | 4 +- .../action/update/UpdateRequestTests.java | 12 + .../allocation/FilteringAllocationIT.java | 58 + .../metadata/IndexCreationTaskTests.java | 4 +- .../cluster/node/DiscoveryNodesTests.java | 7 +- .../common/settings/ScopedSettingsTests.java | 44 + .../common/util/CollectionUtilsTests.java | 16 + .../common/xcontent/BaseXContentTestCase.java | 23 +- .../org/elasticsearch/get/GetActionIT.java | 4 +- .../engine/CombinedDeletionPolicyTests.java | 81 +- .../index/engine/InternalEngineTests.java | 88 +- .../index/mapper/AllFieldMapperTests.java | 2 +- .../index/mapper/BooleanFieldMapperTests.java | 2 +- .../index/mapper/CopyToMapperTests.java | 20 +- .../index/mapper/DateFieldMapperTests.java | 6 +- .../mapper/DocumentMapperMergeTests.java | 34 +- .../index/mapper/DynamicMappingTests.java | 12 +- .../index/mapper/ExternalMapper.java | 2 +- .../index/mapper/FakeStringFieldMapper.java | 4 +- .../mapper/FieldNamesFieldMapperTests.java | 6 +- .../index/mapper/FieldTypeLookupTests.java | 56 +- .../mapper/GeoShapeFieldMapperTests.java | 6 +- .../index/mapper/IdFieldMapperTests.java | 4 +- .../mapper/JavaMultiFieldMergeTests.java | 16 +- .../index/mapper/KeywordFieldMapperTests.java | 6 +- .../index/mapper/MapperServiceTests.java | 36 +- .../index/mapper/NestedObjectMapperTests.java | 16 +- .../index/mapper/ObjectMapperTests.java | 4 +- .../index/mapper/ParentFieldMapperTests.java | 12 +- ...angeFieldQueryStringQueryBuilderTests.java | 2 +- .../index/mapper/RootObjectMapperTests.java | 24 +- .../index/mapper/SourceFieldMapperTests.java | 10 +- .../mapper/StoredNumericValuesTests.java | 2 +- .../index/mapper/TextFieldMapperTests.java | 4 +- .../index/mapper/TypeFieldMapperTests.java | 6 +- .../index/mapper/UidFieldMapperTests.java | 4 +- .../index/mapper/UpdateMappingTests.java | 30 +- .../index/query/MatchQueryBuilderTests.java | 2 +- .../index/query/NestedQueryBuilderTests.java | 2 +- .../query/QueryStringQueryBuilderTests.java | 4 +- .../index/query/RangeQueryBuilderTests.java | 14 +- .../index/query/RangeQueryRewriteTests.java | 4 +- .../query/SimpleQueryStringBuilderTests.java | 18 + .../index/query/TermsQueryBuilderTests.java | 7 +- .../query/TermsSetQueryBuilderTests.java | 2 +- .../ESIndexLevelReplicationTestCase.java | 2 +- .../RecoveryDuringReplicationTests.java | 46 +- .../index/search/MultiMatchQueryTests.java | 2 +- .../index/seqno/CountedBitSetTests.java | 5 - .../GlobalCheckpointSyncActionTests.java | 4 +- .../seqno/LocalCheckpointTrackerTests.java | 3 +- .../mapping/UpdateMappingIntegrationIT.java | 1 - .../indices/stats/IndexStatsIT.java | 7 +- .../action/cat/RestRecoveryActionTests.java | 4 +- .../composite/CompositeAggregatorTests.java | 87 ++ .../composite/InternalCompositeTests.java | 56 +- .../metrics/ExtendedStatsAggregatorTests.java | 65 + .../metrics/InternalExtendedStatsTests.java | 42 + .../metrics/InternalStatsTests.java | 52 +- .../metrics/InternalSumTests.java | 45 +- .../metrics/StatsAggregatorTests.java | 63 + .../metrics/SumAggregatorTests.java | 54 +- .../metrics/avg/AvgAggregatorTests.java | 61 +- .../metrics/avg/InternalAvgTests.java | 41 + .../search/child/ParentFieldLoadingIT.java | 1 - .../search/query/SearchQueryIT.java | 14 + settings.gradle | 155 +-- .../index/engine/TranslogHandler.java | 2 +- .../index/mapper/FieldTypeTestCase.java | 46 +- .../index/shard/IndexShardTestCase.java | 4 +- .../test/AbstractQueryTestCase.java | 16 +- .../elasticsearch/test/ESIntegTestCase.java | 4 +- .../test/InternalAggregationTestCase.java | 1 + .../hamcrest/ElasticsearchAssertions.java | 6 +- .../test/rest/yaml/ClientYamlTestClient.java | 2 +- .../AbstractSimpleTransportTestCase.java | 21 +- .../transport/nio/MockNioTransport.java | 29 +- 372 files changed, 6707 insertions(+), 3442 deletions(-) create mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy create mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesExtension.groovy create mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesTask.groovy create mode 100644 buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.es-meta-plugin.properties create mode 100644 docs/java-rest/high-level/apis/close_index.asciidoc create mode 100644 docs/java-rest/high-level/apis/open_index.asciidoc create mode 100644 docs/java-rest/high-level/apis/putmapping.asciidoc create mode 100644 libs/build.gradle create mode 100644 libs/elasticsearch-core/src/main/eclipse-build.gradle create mode 100644 libs/elasticsearch-core/src/test/eclipse-build.gradle create mode 100644 libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java delete mode 100644 libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesReadContext.java delete mode 100644 libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteContext.java create mode 100644 libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteOperation.java create mode 100644 libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelContext.java create mode 100644 libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ServerChannelContext.java create mode 100644 libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java create mode 100644 libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java delete mode 100644 libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesReadContextTests.java delete mode 100644 libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesWriteContextTests.java rename libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteContext.java => modules/lang-painless/spi/build.gradle (66%) rename modules/lang-painless/{ => spi}/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java (100%) rename modules/lang-painless/{ => spi}/src/main/java/org/elasticsearch/painless/spi/Whitelist.java (100%) rename modules/lang-painless/{ => spi}/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java (100%) create mode 100644 qa/build.gradle delete mode 100644 server/licenses/joda-time-2.9.5.jar.sha1 create mode 100644 server/licenses/joda-time-2.9.9.jar.sha1 create mode 100644 server/src/main/java/org/elasticsearch/monitor/jvm/JvmPid.java rename libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadContext.java => server/src/main/java9/org/elasticsearch/monitor/jvm/JvmPid.java (71%) create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponseTests.java create mode 100644 server/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 92ffa75de6c23..985c70a39a091 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -92,6 +92,14 @@ Contributing to the Elasticsearch codebase **Repository:** [https://github.com/elastic/elasticsearch](https://github.com/elastic/elasticsearch) +JDK 9 is required to build Elasticsearch. You must have a JDK 9 installation +with the environment variable `JAVA_HOME` referencing the path to Java home for +your JDK 9 installation. By default, tests use the same runtime as `JAVA_HOME`. +However, since Elasticsearch, supports JDK 8 the build supports compiling with +JDK 9 and testing on a JDK 8 runtime; to do this, set `RUNTIME_JAVA_HOME` +pointing to the Java home of a JDK 8 installation. Note that this mechanism can +be used to test against other JDKs as well, this is not only limited to JDK 8. + Elasticsearch uses the Gradle wrapper for its build. You can execute Gradle using the wrapper via the `gradlew` script in the root of the repository. @@ -118,8 +126,8 @@ Alternatively, `idea.no.launcher=true` can be set in the [`idea.properties`](https://www.jetbrains.com/help/idea/file-idea-properties.html) file which can be accessed under Help > Edit Custom Properties (this will require a restart of IDEA). For IDEA 2017.3 and above, in addition to the JVM option, you will need to go to -`Run->Edit Configurations->...->Defaults->JUnit` and change the value for the `Shorten command line` setting from -`user-local default: none` to `classpath file`. You may also need to [remove `ant-javafx.jar` from your +`Run->Edit Configurations->...->Defaults->JUnit` and verify that the `Shorten command line` setting is set to +`user-local default: none`. You may also need to [remove `ant-javafx.jar` from your classpath](https://github.com/elastic/elasticsearch/issues/14348) if that is reported as a source of jar hell. diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 63012a2d99d13..0df80116099e3 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -43,6 +43,7 @@ import org.gradle.api.publish.maven.MavenPublication import org.gradle.api.publish.maven.plugins.MavenPublishPlugin import org.gradle.api.publish.maven.tasks.GenerateMavenPom import org.gradle.api.tasks.bundling.Jar +import org.gradle.api.tasks.compile.GroovyCompile import org.gradle.api.tasks.compile.JavaCompile import org.gradle.api.tasks.javadoc.Javadoc import org.gradle.internal.jvm.Jvm @@ -56,7 +57,8 @@ import java.time.ZonedDateTime */ class BuildPlugin implements Plugin { - static final JavaVersion minimumJava = JavaVersion.VERSION_1_8 + static final JavaVersion minimumRuntimeVersion = JavaVersion.VERSION_1_8 + static final JavaVersion minimumCompilerVersion = JavaVersion.VERSION_1_9 @Override void apply(Project project) { @@ -93,20 +95,26 @@ class BuildPlugin implements Plugin { /** Performs checks on the build environment and prints information about the build environment. */ static void globalBuildInfo(Project project) { if (project.rootProject.ext.has('buildChecksDone') == false) { - String javaHome = findJavaHome() + String compilerJavaHome = findCompilerJavaHome() + String runtimeJavaHome = findRuntimeJavaHome(compilerJavaHome) File gradleJavaHome = Jvm.current().javaHome String javaVendor = System.getProperty('java.vendor') String javaVersion = System.getProperty('java.version') String gradleJavaVersionDetails = "${javaVendor} ${javaVersion}" + " [${System.getProperty('java.vm.name')} ${System.getProperty('java.vm.version')}]" - String javaVersionDetails = gradleJavaVersionDetails - JavaVersion javaVersionEnum = JavaVersion.current() - if (new File(javaHome).canonicalPath != gradleJavaHome.canonicalPath) { - javaVersionDetails = findJavaVersionDetails(project, javaHome) - javaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, javaHome)) - javaVendor = findJavaVendor(project, javaHome) - javaVersion = findJavaVersion(project, javaHome) + String compilerJavaVersionDetails = gradleJavaVersionDetails + JavaVersion compilerJavaVersionEnum = JavaVersion.current() + if (new File(compilerJavaHome).canonicalPath != gradleJavaHome.canonicalPath) { + compilerJavaVersionDetails = findJavaVersionDetails(project, compilerJavaHome) + compilerJavaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, compilerJavaHome)) + } + + String runtimeJavaVersionDetails = gradleJavaVersionDetails + JavaVersion runtimeJavaVersionEnum = JavaVersion.current() + if (new File(runtimeJavaHome).canonicalPath != gradleJavaHome.canonicalPath) { + runtimeJavaVersionDetails = findJavaVersionDetails(project, runtimeJavaHome) + runtimeJavaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, runtimeJavaHome)) } // Build debugging info @@ -115,11 +123,13 @@ class BuildPlugin implements Plugin { println '=======================================' println " Gradle Version : ${project.gradle.gradleVersion}" println " OS Info : ${System.getProperty('os.name')} ${System.getProperty('os.version')} (${System.getProperty('os.arch')})" - if (gradleJavaVersionDetails != javaVersionDetails) { + if (gradleJavaVersionDetails != compilerJavaVersionDetails || gradleJavaVersionDetails != runtimeJavaVersionDetails) { println " JDK Version (gradle) : ${gradleJavaVersionDetails}" println " JAVA_HOME (gradle) : ${gradleJavaHome}" - println " JDK Version (compile) : ${javaVersionDetails}" - println " JAVA_HOME (compile) : ${javaHome}" + println " JDK Version (compile) : ${compilerJavaVersionDetails}" + println " JAVA_HOME (compile) : ${compilerJavaHome}" + println " JDK Version (runtime) : ${runtimeJavaVersionDetails}" + println " JAVA_HOME (runtime) : ${runtimeJavaHome}" } else { println " JDK Version : ${gradleJavaVersionDetails}" println " JAVA_HOME : ${gradleJavaHome}" @@ -135,54 +145,49 @@ class BuildPlugin implements Plugin { } // enforce Java version - if (javaVersionEnum < minimumJava) { - throw new GradleException("Java ${minimumJava} or above is required to build Elasticsearch") + if (compilerJavaVersionEnum < minimumCompilerVersion) { + throw new GradleException("Java ${minimumCompilerVersion} or above is required to build Elasticsearch") } - // this block of code detecting buggy JDK 8 compiler versions can be removed when minimum Java version is incremented - assert minimumJava == JavaVersion.VERSION_1_8 : "Remove JDK compiler bug detection only applicable to JDK 8" - if (javaVersionEnum == JavaVersion.VERSION_1_8) { - if (Objects.equals("Oracle Corporation", javaVendor)) { - def matcher = javaVersion =~ /1\.8\.0(?:_(\d+))?/ - if (matcher.matches()) { - int update; - if (matcher.group(1) == null) { - update = 0 - } else { - update = matcher.group(1).toInteger() - } - if (update < 40) { - throw new GradleException("JDK ${javaVendor} ${javaVersion} has compiler bug JDK-8052388, update your JDK to at least 8u40") - } - } - } + if (runtimeJavaVersionEnum < minimumRuntimeVersion) { + throw new GradleException("Java ${minimumRuntimeVersion} or above is required to run Elasticsearch") } - project.rootProject.ext.javaHome = javaHome - project.rootProject.ext.javaVersion = javaVersionEnum + project.rootProject.ext.compilerJavaHome = compilerJavaHome + project.rootProject.ext.runtimeJavaHome = runtimeJavaHome + project.rootProject.ext.compilerJavaVersion = compilerJavaVersionEnum + project.rootProject.ext.runtimeJavaVersion = runtimeJavaVersionEnum project.rootProject.ext.buildChecksDone = true } - project.targetCompatibility = minimumJava - project.sourceCompatibility = minimumJava + + project.targetCompatibility = minimumRuntimeVersion + project.sourceCompatibility = minimumRuntimeVersion + // set java home for each project, so they dont have to find it in the root project - project.ext.javaHome = project.rootProject.ext.javaHome - project.ext.javaVersion = project.rootProject.ext.javaVersion + project.ext.compilerJavaHome = project.rootProject.ext.compilerJavaHome + project.ext.runtimeJavaHome = project.rootProject.ext.runtimeJavaHome + project.ext.compilerJavaVersion = project.rootProject.ext.compilerJavaVersion + project.ext.runtimeJavaVersion = project.rootProject.ext.runtimeJavaVersion } - /** Finds and enforces JAVA_HOME is set */ - private static String findJavaHome() { - String javaHome = System.getenv('JAVA_HOME') + private static String findCompilerJavaHome() { + final String javaHome = System.getenv('JAVA_HOME') if (javaHome == null) { if (System.getProperty("idea.active") != null || System.getProperty("eclipse.launcher") != null) { - // intellij doesn't set JAVA_HOME, so we use the jdk gradle was run with - javaHome = Jvm.current().javaHome + // IntelliJ does not set JAVA_HOME, so we use the JDK that Gradle was run with + return Jvm.current().javaHome } else { - throw new GradleException('JAVA_HOME must be set to build Elasticsearch') + throw new GradleException("JAVA_HOME must be set to build Elasticsearch") } } return javaHome } + private static String findRuntimeJavaHome(final String compilerJavaHome) { + assert compilerJavaHome != null + return System.getenv('RUNTIME_JAVA_HOME') ?: compilerJavaHome + } + /** Finds printable java version of the given JAVA_HOME */ private static String findJavaVersionDetails(Project project, String javaHome) { String versionInfoScript = 'print(' + @@ -412,19 +417,19 @@ class BuildPlugin implements Plugin { /** Adds compiler settings to the project */ static void configureCompile(Project project) { - if (project.javaVersion < JavaVersion.VERSION_1_10) { + if (project.compilerJavaVersion < JavaVersion.VERSION_1_10) { project.ext.compactProfile = 'compact3' } else { project.ext.compactProfile = 'full' } project.afterEvaluate { project.tasks.withType(JavaCompile) { - File gradleJavaHome = Jvm.current().javaHome + final JavaVersion targetCompatibilityVersion = JavaVersion.toVersion(it.targetCompatibility) // we fork because compiling lots of different classes in a shared jvm can eventually trigger GC overhead limitations options.fork = true - options.forkOptions.executable = new File(project.javaHome, 'bin/javac') + options.forkOptions.javaHome = new File(project.compilerJavaHome) options.forkOptions.memoryMaximumSize = "1g" - if (project.targetCompatibility >= JavaVersion.VERSION_1_8) { + if (targetCompatibilityVersion == JavaVersion.VERSION_1_8) { // compile with compact 3 profile by default // NOTE: this is just a compile time check: does not replace testing with a compact3 JRE if (project.compactProfile != 'full') { @@ -448,21 +453,25 @@ class BuildPlugin implements Plugin { options.encoding = 'UTF-8' options.incremental = true - if (project.javaVersion == JavaVersion.VERSION_1_9) { - // hack until gradle supports java 9's new "--release" arg - assert minimumJava == JavaVersion.VERSION_1_8 - options.compilerArgs << '--release' << '8' - } + // TODO: use native Gradle support for --release when available (cf. https://github.com/gradle/gradle/issues/2510) + options.compilerArgs << '--release' << targetCompatibilityVersion.majorVersion + } + // also apply release flag to groovy, which is used in build-tools + project.tasks.withType(GroovyCompile) { + final JavaVersion targetCompatibilityVersion = JavaVersion.toVersion(it.targetCompatibility) + options.fork = true + options.forkOptions.javaHome = new File(project.compilerJavaHome) + options.compilerArgs << '--release' << targetCompatibilityVersion.majorVersion } } } static void configureJavadoc(Project project) { project.tasks.withType(Javadoc) { - executable = new File(project.javaHome, 'bin/javadoc') + executable = new File(project.compilerJavaHome, 'bin/javadoc') } configureJavadocJar(project) - if (project.javaVersion == JavaVersion.VERSION_1_10) { + if (project.compilerJavaVersion == JavaVersion.VERSION_1_10) { project.tasks.withType(Javadoc) { it.enabled = false } project.tasks.getByName('javadocJar').each { it.enabled = false } } @@ -508,7 +517,7 @@ class BuildPlugin implements Plugin { 'X-Compile-Lucene-Version': VersionProperties.lucene, 'X-Compile-Elasticsearch-Snapshot': isSnapshot, 'Build-Date': ZonedDateTime.now(ZoneOffset.UTC), - 'Build-Java-Version': project.javaVersion) + 'Build-Java-Version': project.compilerJavaVersion) if (jarTask.manifest.attributes.containsKey('Change') == false) { logger.warn('Building without git revision id.') jarTask.manifest.attributes('Change': 'Unknown') @@ -545,7 +554,7 @@ class BuildPlugin implements Plugin { /** Returns a closure of common configuration shared by unit and integration tests. */ static Closure commonTestConfig(Project project) { return { - jvm "${project.javaHome}/bin/java" + jvm "${project.runtimeJavaHome}/bin/java" parallelism System.getProperty('tests.jvms', 'auto') ifNoTests 'fail' onNonEmptyWorkDirectory 'wipe' @@ -650,7 +659,10 @@ class BuildPlugin implements Plugin { Task precommit = PrecommitTasks.create(project, true) project.check.dependsOn(precommit) project.test.mustRunAfter(precommit) - project.dependencyLicenses.dependencies = project.configurations.runtime - project.configurations.provided + // only require dependency licenses for non-elasticsearch deps + project.dependencyLicenses.dependencies = project.configurations.runtime.fileCollection { + it.group.startsWith('org.elasticsearch') == false + } - project.configurations.provided } private static configureDependenciesInfo(Project project) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy new file mode 100644 index 0000000000000..3df9b604c1309 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.plugin + +import org.elasticsearch.gradle.BuildPlugin +import org.elasticsearch.gradle.test.RestTestPlugin +import org.elasticsearch.gradle.test.RunTask +import org.elasticsearch.gradle.test.StandaloneRestTestPlugin +import org.gradle.api.Plugin +import org.gradle.api.Project +import org.gradle.api.file.FileCopyDetails +import org.gradle.api.file.RelativePath +import org.gradle.api.tasks.bundling.Zip + +class MetaPluginBuildPlugin implements Plugin { + + @Override + void apply(Project project) { + project.plugins.apply(StandaloneRestTestPlugin) + project.plugins.apply(RestTestPlugin) + + createBundleTask(project) + + project.integTestCluster { + dependsOn(project.bundlePlugin) + plugin(project.path) + } + BuildPlugin.configurePomGeneration(project) + project.afterEvaluate { + PluginBuildPlugin.addZipPomGeneration(project) + } + + RunTask run = project.tasks.create('run', RunTask) + run.dependsOn(project.bundlePlugin) + run.clusterConfig.plugin(project.path) + } + + private static void createBundleTask(Project project) { + + MetaPluginPropertiesTask buildProperties = project.tasks.create('pluginProperties', MetaPluginPropertiesTask.class) + + // create the actual bundle task, which zips up all the files for the plugin + Zip bundle = project.tasks.create(name: 'bundlePlugin', type: Zip, dependsOn: [buildProperties]) { + into('elasticsearch') { + from(buildProperties.descriptorOutput.parentFile) { + // plugin properties file + include(buildProperties.descriptorOutput.name) + } + } + // due to how the renames work for each bundled plugin, we must exclude empty dirs or every subdir + // within bundled plugin zips will show up at the root as an empty dir + includeEmptyDirs = false + + } + project.assemble.dependsOn(bundle) + + // also make the zip available as a configuration (used when depending on this project) + project.configurations.create('zip') + project.artifacts.add('zip', bundle) + + // a super hacky way to inject code to run at the end of each of the bundled plugin's configuration + // to add itself back to this meta plugin zip + project.afterEvaluate { + buildProperties.extension.plugins.each { String bundledPluginProjectName -> + Project bundledPluginProject = project.project(bundledPluginProjectName) + bundledPluginProject.afterEvaluate { + bundle.configure { + dependsOn bundledPluginProject.bundlePlugin + from(project.zipTree(bundledPluginProject.bundlePlugin.outputs.files.singleFile)) { + eachFile { FileCopyDetails details -> + // paths in the individual plugins begin with elasticsearch, and we want to add in the + // bundled plugin name between that and each filename + details.relativePath = new RelativePath(true, 'elasticsearch', bundledPluginProjectName, + details.relativePath.toString().replace('elasticsearch/', '')) + } + } + } + } + } + } + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesExtension.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesExtension.groovy new file mode 100644 index 0000000000000..e5d84002e533f --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesExtension.groovy @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.plugin + +import org.gradle.api.Project +import org.gradle.api.tasks.Input + +/** + * A container for meta plugin properties that will be written to the meta plugin descriptor, for easy + * manipulation in the gradle DSL. + */ +class MetaPluginPropertiesExtension { + @Input + String name + + @Input + String description + + /** + * The plugins this meta plugin wraps. + * Note this is not written to the plugin descriptor, but used to setup the final zip file task. + */ + @Input + List plugins + + MetaPluginPropertiesExtension(Project project) { + name = project.name + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesTask.groovy new file mode 100644 index 0000000000000..e868cc2cc3128 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesTask.groovy @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.plugin + +import org.gradle.api.InvalidUserDataException +import org.gradle.api.Task +import org.gradle.api.tasks.Copy +import org.gradle.api.tasks.OutputFile + +class MetaPluginPropertiesTask extends Copy { + + MetaPluginPropertiesExtension extension + + @OutputFile + File descriptorOutput = new File(project.buildDir, 'generated-resources/meta-plugin-descriptor.properties') + + MetaPluginPropertiesTask() { + File templateFile = new File(project.buildDir, "templates/${descriptorOutput.name}") + Task copyPluginPropertiesTemplate = project.tasks.create('copyPluginPropertiesTemplate') { + doLast { + InputStream resourceTemplate = PluginPropertiesTask.getResourceAsStream("/${descriptorOutput.name}") + templateFile.parentFile.mkdirs() + templateFile.setText(resourceTemplate.getText('UTF-8'), 'UTF-8') + } + } + + dependsOn(copyPluginPropertiesTemplate) + extension = project.extensions.create('es_meta_plugin', MetaPluginPropertiesExtension, project) + project.afterEvaluate { + // check require properties are set + if (extension.name == null) { + throw new InvalidUserDataException('name is a required setting for es_meta_plugin') + } + if (extension.description == null) { + throw new InvalidUserDataException('description is a required setting for es_meta_plugin') + } + // configure property substitution + from(templateFile.parentFile).include(descriptorOutput.name) + into(descriptorOutput.parentFile) + Map properties = generateSubstitutions() + expand(properties) + inputs.properties(properties) + } + } + + Map generateSubstitutions() { + return ['name': extension.name, + 'description': extension.description + ] + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index c3be764269358..950acad9a5eb4 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -18,6 +18,7 @@ */ package org.elasticsearch.gradle.plugin +import nebula.plugin.info.scm.ScmInfoPlugin import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.NoticeTask import org.elasticsearch.gradle.test.RestIntegTestTask @@ -169,7 +170,7 @@ public class PluginBuildPlugin extends BuildPlugin { Files.copy(jarFile.resolveSibling(sourcesFileName), jarFile.resolveSibling(clientSourcesFileName), StandardCopyOption.REPLACE_EXISTING) - if (project.javaVersion < JavaVersion.VERSION_1_10) { + if (project.compilerJavaVersion < JavaVersion.VERSION_1_10) { String javadocFileName = jarFile.fileName.toString().replace('.jar', '-javadoc.jar') String clientJavadocFileName = clientFileName.replace('.jar', '-javadoc.jar') Files.copy(jarFile.resolveSibling(javadocFileName), jarFile.resolveSibling(clientJavadocFileName), @@ -220,7 +221,8 @@ public class PluginBuildPlugin extends BuildPlugin { } /** Adds a task to generate a pom file for the zip distribution. */ - protected void addZipPomGeneration(Project project) { + public static void addZipPomGeneration(Project project) { + project.plugins.apply(ScmInfoPlugin.class) project.plugins.apply(MavenPublishPlugin.class) project.publishing { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy index f8eb0a63c96d7..656d5e0d35a9e 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy @@ -42,7 +42,7 @@ public class JarHellTask extends LoggedExec { inputs.files(classpath) dependsOn(classpath) description = "Runs CheckJarHell on ${classpath}" - executable = new File(project.javaHome, 'bin/java') + executable = new File(project.runtimeJavaHome, 'bin/java') doFirst({ /* JarHell doesn't like getting directories that don't exist but gradle isn't especially careful about that. So we have to do it diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy index 01ec6f7f5d3e2..87b73795604ab 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy @@ -44,7 +44,7 @@ public class LoggerUsageTask extends LoggedExec { project.afterEvaluate { dependsOn(classpath) description = "Runs LoggerUsageCheck on ${classDirectories}" - executable = new File(project.javaHome, 'bin/java') + executable = new File(project.runtimeJavaHome, 'bin/java') if (classDirectories == null) { // Default to main and test class files List files = [] diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy index 2711a0e38f23b..0feed8ccc4e04 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy @@ -80,7 +80,7 @@ public class NamingConventionsTask extends LoggedExec { FileCollection classpath = project.sourceSets.test.runtimeClasspath inputs.files(classpath) description = "Tests that test classes aren't misnamed or misplaced" - executable = new File(project.javaHome, 'bin/java') + executable = new File(project.runtimeJavaHome, 'bin/java') if (false == checkForTestsInMain) { /* This task is created by default for all subprojects with this * setting and there is no point in running it if the files don't diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 2b3b5abd82c9c..593a08c873594 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -23,6 +23,8 @@ import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties +import org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin +import org.elasticsearch.gradle.plugin.MetaPluginPropertiesExtension import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.elasticsearch.gradle.plugin.PluginPropertiesExtension import org.gradle.api.AntBuilder @@ -138,8 +140,8 @@ class ClusterFormationTasks { /** Adds a dependency on a different version of the given plugin, which will be retrieved using gradle's dependency resolution */ static void configureBwcPluginDependency(String name, Project project, Project pluginProject, Configuration configuration, String elasticsearchVersion) { verifyProjectHasBuildPlugin(name, elasticsearchVersion, project, pluginProject) - PluginPropertiesExtension extension = pluginProject.extensions.findByName('esplugin'); - project.dependencies.add(configuration.name, "org.elasticsearch.plugin:${extension.name}:${elasticsearchVersion}@zip") + final String pluginName = findPluginName(pluginProject) + project.dependencies.add(configuration.name, "org.elasticsearch.plugin:${pluginName}:${elasticsearchVersion}@zip") } /** @@ -449,7 +451,7 @@ class ClusterFormationTasks { configuration = project.configurations.create(configurationName) } - final String depName = pluginProject.extensions.findByName('esplugin').name + final String depName = findPluginName(pluginProject) Dependency dep = bwcPlugins.dependencies.find { it.name == depName @@ -655,7 +657,7 @@ class ClusterFormationTasks { String pid = node.pidFile.getText('UTF-8') ByteArrayOutputStream output = new ByteArrayOutputStream() project.exec { - commandLine = ["${project.javaHome}/bin/jstack", pid] + commandLine = ["${project.runtimeJavaHome}/bin/jstack", pid] standardOutput = output } output.toString('UTF-8').eachLine { line -> logger.error("| ${line}") } @@ -699,7 +701,7 @@ class ClusterFormationTasks { } private static File getJpsExecutableByName(Project project, String jpsExecutableName) { - return Paths.get(project.javaHome.toString(), "bin/" + jpsExecutableName).toFile() + return Paths.get(project.runtimeJavaHome.toString(), "bin/" + jpsExecutableName).toFile() } /** Adds a task to kill an elasticsearch node with the given pidfile */ @@ -753,9 +755,19 @@ class ClusterFormationTasks { } static void verifyProjectHasBuildPlugin(String name, String version, Project project, Project pluginProject) { - if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false) { + if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false && pluginProject.plugins.hasPlugin(MetaPluginBuildPlugin) == false) { throw new GradleException("Task [${name}] cannot add plugin [${pluginProject.path}] with version [${version}] to project's " + - "[${project.path}] dependencies: the plugin is not an esplugin") + "[${project.path}] dependencies: the plugin is not an esplugin or es_meta_plugin") + } + } + + /** Find the plugin name in the given project, whether a regular plugin or meta plugin. */ + static String findPluginName(Project pluginProject) { + PluginPropertiesExtension extension = pluginProject.extensions.findByName('esplugin') + if (extension != null) { + return extension.name + } else { + return pluginProject.extensions.findByName('es_meta_plugin').name } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index 77da1c8ed7824..40a8ec230ac4e 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -162,7 +162,7 @@ class NodeInfo { args.add("${esScript}") } - env = ['JAVA_HOME': project.javaHome] + env = ['JAVA_HOME': project.runtimeJavaHome] args.addAll("-E", "node.portsfile=true") String collectedSystemProperties = config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ") String esJavaOpts = config.jvmArgs.isEmpty() ? collectedSystemProperties : collectedSystemProperties + " " + config.jvmArgs diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.es-meta-plugin.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.es-meta-plugin.properties new file mode 100644 index 0000000000000..50240e95416c7 --- /dev/null +++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.es-meta-plugin.properties @@ -0,0 +1,20 @@ +# +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +implementation-class=org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index 3e5e00a224d9c..5906faf1f8947 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -21,10 +21,14 @@ import org.apache.http.Header; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; @@ -51,9 +55,9 @@ public final class IndicesClient { * See * Delete Index API on elastic.co */ - public DeleteIndexResponse deleteIndex(DeleteIndexRequest deleteIndexRequest, Header... headers) throws IOException { + public DeleteIndexResponse delete(DeleteIndexRequest deleteIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent, - Collections.emptySet(), headers); + Collections.emptySet(), headers); } /** @@ -62,10 +66,9 @@ public DeleteIndexResponse deleteIndex(DeleteIndexRequest deleteIndexRequest, He * See * Delete Index API on elastic.co */ - public void deleteIndexAsync(DeleteIndexRequest deleteIndexRequest, ActionListener listener, - Header... headers) { + public void deleteAsync(DeleteIndexRequest deleteIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent, - listener, Collections.emptySet(), headers); + listener, Collections.emptySet(), headers); } /** @@ -74,7 +77,7 @@ public void deleteIndexAsync(DeleteIndexRequest deleteIndexRequest, ActionListen * See * Create Index API on elastic.co */ - public CreateIndexResponse createIndex(CreateIndexRequest createIndexRequest, Header... headers) throws IOException { + public CreateIndexResponse create(CreateIndexRequest createIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent, Collections.emptySet(), headers); } @@ -85,9 +88,31 @@ public CreateIndexResponse createIndex(CreateIndexRequest createIndexRequest, He * See * Create Index API on elastic.co */ - public void createIndexAsync(CreateIndexRequest createIndexRequest, ActionListener listener, - Header... headers) { + public void createAsync(CreateIndexRequest createIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent, + listener, Collections.emptySet(), headers); + } + + /** + * Updates the mappings on an index using the Put Mapping API + *

+ * See + * Put Mapping API on elastic.co + */ + public PutMappingResponse putMapping(PutMappingRequest putMappingRequest, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(putMappingRequest, Request::putMapping, PutMappingResponse::fromXContent, + Collections.emptySet(), headers); + } + + /** + * Asynchronously updates the mappings on an index using the Put Mapping API + *

+ * See + * Put Mapping API on elastic.co + */ + public void putMappingAsync(PutMappingRequest putMappingRequest, ActionListener listener, + Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, Request::putMapping, PutMappingResponse::fromXContent, listener, Collections.emptySet(), headers); } @@ -97,7 +122,7 @@ public void createIndexAsync(CreateIndexRequest createIndexRequest, ActionListen * See * Open Index API on elastic.co */ - public OpenIndexResponse openIndex(OpenIndexRequest openIndexRequest, Header... headers) throws IOException { + public OpenIndexResponse open(OpenIndexRequest openIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent, Collections.emptySet(), headers); } @@ -108,16 +133,36 @@ public OpenIndexResponse openIndex(OpenIndexRequest openIndexRequest, Header... * See * Open Index API on elastic.co */ - public void openIndexAsync(OpenIndexRequest openIndexRequest, ActionListener listener, Header... headers) { + public void openAsync(OpenIndexRequest openIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent, listener, Collections.emptySet(), headers); } /** - * Refresh one or more index using the Refresh API + * Closes an index using the Close Index API + *

+ * See + * Close Index API on elastic.co + */ + public CloseIndexResponse close(CloseIndexRequest closeIndexRequest, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(closeIndexRequest, Request::closeIndex, CloseIndexResponse::fromXContent, + Collections.emptySet(), headers); + } + + /** + * Asynchronously closes an index using the Close Index API + *

+ * See + * Close Index API on elastic.co + */ + public void closeAsync(CloseIndexRequest closeIndexRequest, ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, Request::closeIndex, CloseIndexResponse::fromXContent, + listener, Collections.emptySet(), headers); + } + /** + * Refresh one or more indices using the Refresh API *

- * See - * Refresh API on elastic.co + * See Refresh API on elastic.co */ public final RefreshResponse refresh(RefreshRequest refreshRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(refreshRequest, Request::refresh, RefreshResponse::fromXContent, @@ -125,10 +170,9 @@ public final RefreshResponse refresh(RefreshRequest refreshRequest, Header... he } /** - * Asynchronously refresh one or more index using the Refresh API + * Asynchronously refresh one or more indices using the Refresh API *

- * See - * Refresh API on elastic.co + * See Refresh API on elastic.co */ public final void refreshAsync(RefreshRequest refreshRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(refreshRequest, Request::refresh, RefreshResponse::fromXContent, diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java index 9525bb52515f8..f703f14c8f69e 100755 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -29,13 +29,16 @@ import org.apache.http.entity.ContentType; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.MultiSearchRequest; @@ -153,6 +156,18 @@ static Request openIndex(OpenIndexRequest openIndexRequest) { return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); } + static Request closeIndex(CloseIndexRequest closeIndexRequest) { + String endpoint = endpoint(closeIndexRequest.indices(), Strings.EMPTY_ARRAY, "_close"); + + Params parameters = Params.builder(); + + parameters.withTimeout(closeIndexRequest.timeout()); + parameters.withMasterTimeout(closeIndexRequest.masterNodeTimeout()); + parameters.withIndicesOptions(closeIndexRequest.indicesOptions()); + + return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); + } + static Request refresh(RefreshRequest refreshRequest) { String endpoint = endpoint(refreshRequest.indices(), Strings.EMPTY_ARRAY, "_refresh"); return new Request(HttpPost.METHOD_NAME, endpoint, Collections.emptyMap(), null); @@ -165,12 +180,27 @@ static Request createIndex(CreateIndexRequest createIndexRequest) throws IOExcep parameters.withTimeout(createIndexRequest.timeout()); parameters.withMasterTimeout(createIndexRequest.masterNodeTimeout()); parameters.withWaitForActiveShards(createIndexRequest.waitForActiveShards()); - parameters.withUpdateAllTypes(createIndexRequest.updateAllTypes()); HttpEntity entity = createEntity(createIndexRequest, REQUEST_BODY_CONTENT_TYPE); return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity); } + static Request putMapping(PutMappingRequest putMappingRequest) throws IOException { + // The concreteIndex is an internal concept, not applicable to requests made over the REST API. + if (putMappingRequest.getConcreteIndex() != null) { + throw new IllegalArgumentException("concreteIndex cannot be set on PutMapping requests made over the REST API"); + } + + String endpoint = endpoint(putMappingRequest.indices(), "_mapping", putMappingRequest.type()); + + Params parameters = Params.builder(); + parameters.withTimeout(putMappingRequest.timeout()); + parameters.withMasterTimeout(putMappingRequest.masterNodeTimeout()); + + HttpEntity entity = createEntity(putMappingRequest, REQUEST_BODY_CONTENT_TYPE); + return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity); + } + static Request info() { return new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null); } @@ -318,6 +348,15 @@ static Request get(GetRequest getRequest) { return new Request(HttpGet.METHOD_NAME, endpoint, parameters.getParams(), null); } + static Request multiGet(MultiGetRequest multiGetRequest) throws IOException { + Params parameters = Params.builder(); + parameters.withPreference(multiGetRequest.preference()); + parameters.withRealtime(multiGetRequest.realtime()); + parameters.withRefresh(multiGetRequest.refresh()); + HttpEntity entity = createEntity(multiGetRequest, REQUEST_BODY_CONTENT_TYPE); + return new Request(HttpGet.METHOD_NAME, "/_mget", parameters.getParams(), entity); + } + static Request index(IndexRequest indexRequest) { String method = Strings.hasLength(indexRequest.id()) ? HttpPut.METHOD_NAME : HttpPost.METHOD_NAME; @@ -438,6 +477,10 @@ static String endpoint(String[] indices, String[] types, String endpoint) { return endpoint(String.join(",", indices), String.join(",", types), endpoint); } + static String endpoint(String[] indices, String endpoint, String type) { + return endpoint(String.join(",", indices), endpoint, type); + } + /** * Utility method to build request's endpoint. */ @@ -568,13 +611,6 @@ Params withTimeout(TimeValue timeout) { return putParam("timeout", timeout); } - Params withUpdateAllTypes(boolean updateAllTypes) { - if (updateAllTypes) { - return putParam("update_all_types", Boolean.TRUE.toString()); - } - return this; - } - Params withVersion(long version) { if (version != Versions.MATCH_ANY) { return putParam("version", Long.toString(version)); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 5f330256f6846..9fb53a54d8c0a 100755 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -32,6 +32,8 @@ import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.main.MainRequest; @@ -287,6 +289,25 @@ public final void getAsync(GetRequest getRequest, ActionListener li performRequestAsyncAndParseEntity(getRequest, Request::get, GetResponse::fromXContent, listener, singleton(404), headers); } + /** + * Retrieves multiple documents by id using the Multi Get API + * + * See Multi Get API on elastic.co + */ + public final MultiGetResponse multiGet(MultiGetRequest multiGetRequest, Header... headers) throws IOException { + return performRequestAndParseEntity(multiGetRequest, Request::multiGet, MultiGetResponse::fromXContent, singleton(404), headers); + } + + /** + * Asynchronously retrieves multiple documents by id using the Multi Get API + * + * See Multi Get API on elastic.co + */ + public void multiGetAsync(MultiGetRequest multiGetRequest, ActionListener listener, Header... headers) { + performRequestAsyncAndParseEntity(multiGetRequest, Request::multiGet, MultiGetResponse::fromXContent, listener, + singleton(404), headers); + } + /** * Checks for the existence of a document. Returns true if it exists, false otherwise * diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index e36c445082ed6..14d29ddd9eb67 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -33,6 +33,8 @@ import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.update.UpdateRequest; @@ -238,6 +240,64 @@ public void testGet() throws IOException { } } + public void testMultiGet() throws IOException { + { + MultiGetRequest multiGetRequest = new MultiGetRequest(); + multiGetRequest.add("index", "type", "id1"); + multiGetRequest.add("index", "type", "id2"); + MultiGetResponse response = execute(multiGetRequest, highLevelClient()::multiGet, highLevelClient()::multiGetAsync); + assertEquals(2, response.getResponses().length); + + assertTrue(response.getResponses()[0].isFailed()); + assertNull(response.getResponses()[0].getResponse()); + assertEquals("id1", response.getResponses()[0].getFailure().getId()); + assertEquals("type", response.getResponses()[0].getFailure().getType()); + assertEquals("index", response.getResponses()[0].getFailure().getIndex()); + assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index]", + response.getResponses()[0].getFailure().getFailure().getMessage()); + + assertTrue(response.getResponses()[1].isFailed()); + assertNull(response.getResponses()[1].getResponse()); + assertEquals("id2", response.getResponses()[1].getId()); + assertEquals("type", response.getResponses()[1].getType()); + assertEquals("index", response.getResponses()[1].getIndex()); + assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index]", + response.getResponses()[1].getFailure().getFailure().getMessage()); + } + + String document = "{\"field\":\"value1\"}"; + StringEntity stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON); + Response r = client().performRequest("PUT", "/index/type/id1", Collections.singletonMap("refresh", "true"), stringEntity); + assertEquals(201, r.getStatusLine().getStatusCode()); + + document = "{\"field\":\"value2\"}"; + stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON); + r = client().performRequest("PUT", "/index/type/id2", Collections.singletonMap("refresh", "true"), stringEntity); + assertEquals(201, r.getStatusLine().getStatusCode()); + + { + MultiGetRequest multiGetRequest = new MultiGetRequest(); + multiGetRequest.add("index", "type", "id1"); + multiGetRequest.add("index", "type", "id2"); + MultiGetResponse response = execute(multiGetRequest, highLevelClient()::multiGet, highLevelClient()::multiGetAsync); + assertEquals(2, response.getResponses().length); + + assertFalse(response.getResponses()[0].isFailed()); + assertNull(response.getResponses()[0].getFailure()); + assertEquals("id1", response.getResponses()[0].getId()); + assertEquals("type", response.getResponses()[0].getType()); + assertEquals("index", response.getResponses()[0].getIndex()); + assertEquals(Collections.singletonMap("field", "value1"), response.getResponses()[0].getResponse().getSource()); + + assertFalse(response.getResponses()[1].isFailed()); + assertNull(response.getResponses()[1].getFailure()); + assertEquals("id2", response.getResponses()[1].getId()); + assertEquals("type", response.getResponses()[1].getType()); + assertEquals("index", response.getResponses()[1].getIndex()); + assertEquals(Collections.singletonMap("field", "value2"), response.getResponses()[1].getResponse().getSource()); + } + } + public void testIndex() throws IOException { final XContentType xContentType = randomFrom(XContentType.values()); { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 21e2e1b0896fd..bd55e0cb4c558 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -21,15 +21,20 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; @@ -38,13 +43,16 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -import java.util.Locale; import java.util.Map; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; public class IndicesClientIT extends ESRestHighLevelClientTestCase { + static { + System.setProperty("tests.rest.cluster", "localhost:9200"); + } @SuppressWarnings("unchecked") public void testCreateIndex() throws IOException { @@ -56,7 +64,7 @@ public void testCreateIndex() throws IOException { CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName); CreateIndexResponse createIndexResponse = - execute(createIndexRequest, highLevelClient().indices()::createIndex, highLevelClient().indices()::createIndexAsync); + execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync); assertTrue(createIndexResponse.isAcknowledged()); assertTrue(indexExists(indexName)); @@ -84,7 +92,7 @@ public void testCreateIndex() throws IOException { createIndexRequest.mapping("type_name", mappingBuilder); CreateIndexResponse createIndexResponse = - execute(createIndexRequest, highLevelClient().indices()::createIndex, highLevelClient().indices()::createIndexAsync); + execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync); assertTrue(createIndexResponse.isAcknowledged()); Map indexMetaData = getIndexMetadata(indexName); @@ -109,6 +117,35 @@ public void testCreateIndex() throws IOException { } } + @SuppressWarnings("unchecked") + public void testPutMapping() throws IOException { + { + // Add mappings to index + String indexName = "mapping_index"; + createIndex(indexName); + + PutMappingRequest putMappingRequest = new PutMappingRequest(indexName); + putMappingRequest.type("type_name"); + XContentBuilder mappingBuilder = JsonXContent.contentBuilder(); + mappingBuilder.startObject().startObject("properties").startObject("field"); + mappingBuilder.field("type", "text"); + mappingBuilder.endObject().endObject().endObject(); + putMappingRequest.source(mappingBuilder); + + PutMappingResponse putMappingResponse = + execute(putMappingRequest, highLevelClient().indices()::putMapping, highLevelClient().indices()::putMappingAsync); + assertTrue(putMappingResponse.isAcknowledged()); + + Map indexMetaData = getIndexMetadata(indexName); + Map mappingsData = (Map) indexMetaData.get("mappings"); + Map typeData = (Map) mappingsData.get("type_name"); + Map properties = (Map) typeData.get("properties"); + Map field = (Map) properties.get("field"); + + assertEquals("text", field.get("type")); + } + } + public void testDeleteIndex() throws IOException { { // Delete index if exists @@ -117,7 +154,7 @@ public void testDeleteIndex() throws IOException { DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indexName); DeleteIndexResponse deleteIndexResponse = - execute(deleteIndexRequest, highLevelClient().indices()::deleteIndex, highLevelClient().indices()::deleteIndexAsync); + execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync); assertTrue(deleteIndexResponse.isAcknowledged()); assertFalse(indexExists(indexName)); @@ -130,91 +167,100 @@ public void testDeleteIndex() throws IOException { DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(deleteIndexRequest, highLevelClient().indices()::deleteIndex, highLevelClient().indices()::deleteIndexAsync)); + () -> execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); } } public void testOpenExistingIndex() throws IOException { - String[] indices = randomIndices(1, 5); - for (String index : indices) { - createIndex(index); - closeIndex(index); - ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("GET", index + "/_search")); - assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus())); - assertThat(exception.getMessage().contains(index), equalTo(true)); - } - - OpenIndexRequest openIndexRequest = new OpenIndexRequest(indices); - OpenIndexResponse openIndexResponse = execute(openIndexRequest, highLevelClient().indices()::openIndex, - highLevelClient().indices()::openIndexAsync); + String index = "index"; + createIndex(index); + closeIndex(index); + ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("GET", index + "/_search")); + assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus())); + assertThat(exception.getMessage().contains(index), equalTo(true)); + + OpenIndexRequest openIndexRequest = new OpenIndexRequest(index); + OpenIndexResponse openIndexResponse = execute(openIndexRequest, highLevelClient().indices()::open, + highLevelClient().indices()::openAsync); assertTrue(openIndexResponse.isAcknowledged()); - for (String index : indices) { - Response response = client().performRequest("GET", index + "/_search"); - assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); - } + Response response = client().performRequest("GET", index + "/_search"); + assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); } public void testOpenNonExistentIndex() throws IOException { - String[] nonExistentIndices = randomIndices(1, 5); - for (String nonExistentIndex : nonExistentIndices) { - assertFalse(indexExists(nonExistentIndex)); - } + String nonExistentIndex = "non_existent_index"; + assertFalse(indexExists(nonExistentIndex)); - OpenIndexRequest openIndexRequest = new OpenIndexRequest(nonExistentIndices); + OpenIndexRequest openIndexRequest = new OpenIndexRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(openIndexRequest, highLevelClient().indices()::openIndex, highLevelClient().indices()::openIndexAsync)); + () -> execute(openIndexRequest, highLevelClient().indices()::open, highLevelClient().indices()::openAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); - OpenIndexRequest lenientOpenIndexRequest = new OpenIndexRequest(nonExistentIndices); + OpenIndexRequest lenientOpenIndexRequest = new OpenIndexRequest(nonExistentIndex); lenientOpenIndexRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); - OpenIndexResponse lenientOpenIndexResponse = execute(lenientOpenIndexRequest, highLevelClient().indices()::openIndex, - highLevelClient().indices()::openIndexAsync); + OpenIndexResponse lenientOpenIndexResponse = execute(lenientOpenIndexRequest, highLevelClient().indices()::open, + highLevelClient().indices()::openAsync); assertThat(lenientOpenIndexResponse.isAcknowledged(), equalTo(true)); - OpenIndexRequest strictOpenIndexRequest = new OpenIndexRequest(nonExistentIndices); + OpenIndexRequest strictOpenIndexRequest = new OpenIndexRequest(nonExistentIndex); strictOpenIndexRequest.indicesOptions(IndicesOptions.strictExpandOpen()); ElasticsearchException strictException = expectThrows(ElasticsearchException.class, - () -> execute(openIndexRequest, highLevelClient().indices()::openIndex, highLevelClient().indices()::openIndexAsync)); + () -> execute(openIndexRequest, highLevelClient().indices()::open, highLevelClient().indices()::openAsync)); assertEquals(RestStatus.NOT_FOUND, strictException.status()); } + public void testCloseExistingIndex() throws IOException { + String index = "index"; + createIndex(index); + Response response = client().performRequest("GET", index + "/_search"); + assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); + + CloseIndexRequest closeIndexRequest = new CloseIndexRequest(index); + CloseIndexResponse closeIndexResponse = execute(closeIndexRequest, highLevelClient().indices()::close, + highLevelClient().indices()::closeAsync); + assertTrue(closeIndexResponse.isAcknowledged()); + + ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("GET", index + "/_search")); + assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus())); + assertThat(exception.getMessage().contains(index), equalTo(true)); + } + + public void testCloseNonExistentIndex() throws IOException { + String nonExistentIndex = "non_existent_index"; + assertFalse(indexExists(nonExistentIndex)); + + CloseIndexRequest closeIndexRequest = new CloseIndexRequest(nonExistentIndex); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> execute(closeIndexRequest, highLevelClient().indices()::close, highLevelClient().indices()::closeAsync)); + assertEquals(RestStatus.NOT_FOUND, exception.status()); + } + public void testRefresh() throws IOException { + // 10 shards per index by default (5 primary shards + 5 replica shards) + final int DEFAULT_SHRADS_NUMBER_PER_INDEX = 10; { - String[] indices = randomIndices(1, 5); - for (String index : indices) { - createIndex(index); - } - RefreshRequest refreshRequest = new RefreshRequest(indices); + String index = "index"; + createIndex(index); + RefreshRequest refreshRequest = new RefreshRequest(index); RefreshResponse refreshResponse = execute(refreshRequest, highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync); - // 10 shards per index by default - assertThat(refreshResponse.getTotalShards(), equalTo(indices.length * 10)); + assertThat(refreshResponse.getTotalShards(), equalTo(DEFAULT_SHRADS_NUMBER_PER_INDEX)); + assertThat(refreshResponse.getSuccessfulShards(), greaterThan(0)); + assertThat(refreshResponse.getFailedShards(), equalTo(0)); + assertThat(refreshResponse.getShardFailures(), equalTo(BroadcastResponse.EMPTY)); } { - String[] nonExistentIndices = randomIndices(1, 5); - for (String nonExistentIndex : nonExistentIndices) { - if (indexExists(nonExistentIndex)) { - return; - } - } - RefreshRequest refreshRequest = new RefreshRequest(nonExistentIndices); + String nonExistentIndex = "non_existent_index"; + assertFalse(indexExists(nonExistentIndex)); + RefreshRequest refreshRequest = new RefreshRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(refreshRequest, highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); } } - private static String[] randomIndices(int minIndicesNum, int maxIndicesNum) { - int numIndices = randomIntBetween(minIndicesNum, maxIndicesNum); - String[] indices = new String[numIndices]; - for (int i = 0; i < numIndices; i++) { - indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT); - } - return indices; - } - private static void createIndex(String index) throws IOException { Response response = client().performRequest("PUT", index); assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java index 019162bae37a7..0ddaf1de1ca52 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java @@ -25,13 +25,16 @@ import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.MultiSearchRequest; @@ -147,6 +150,59 @@ public void testGet() { getAndExistsTest(Request::get, "GET"); } + public void testMultiGet() throws IOException { + Map expectedParams = new HashMap<>(); + MultiGetRequest multiGetRequest = new MultiGetRequest(); + if (randomBoolean()) { + String preference = randomAlphaOfLength(4); + multiGetRequest.preference(preference); + expectedParams.put("preference", preference); + } + if (randomBoolean()) { + multiGetRequest.realtime(randomBoolean()); + if (multiGetRequest.realtime() == false) { + expectedParams.put("realtime", "false"); + } + } + if (randomBoolean()) { + multiGetRequest.refresh(randomBoolean()); + if (multiGetRequest.refresh()) { + expectedParams.put("refresh", "true"); + } + } + + int numberOfRequests = randomIntBetween(0, 32); + for (int i = 0; i < numberOfRequests; i++) { + MultiGetRequest.Item item = + new MultiGetRequest.Item(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4)); + if (randomBoolean()) { + item.routing(randomAlphaOfLength(4)); + } + if (randomBoolean()) { + item.parent(randomAlphaOfLength(4)); + } + if (randomBoolean()) { + item.storedFields(generateRandomStringArray(16, 8, false)); + } + if (randomBoolean()) { + item.version(randomNonNegativeLong()); + } + if (randomBoolean()) { + item.versionType(randomFrom(VersionType.values())); + } + if (randomBoolean()) { + randomizeFetchSourceContextParams(item::fetchSourceContext, new HashMap<>()); + } + multiGetRequest.add(item); + } + + Request request = Request.multiGet(multiGetRequest); + assertEquals("GET", request.getMethod()); + assertEquals("/_mget", request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + assertToXContentBody(multiGetRequest, request.getEntity()); + } + public void testDelete() { String index = randomAlphaOfLengthBetween(3, 10); String type = randomAlphaOfLengthBetween(3, 10); @@ -255,14 +311,6 @@ public void testCreateIndex() throws IOException { setRandomMasterTimeout(createIndexRequest, expectedParams); setRandomWaitForActiveShards(createIndexRequest::waitForActiveShards, expectedParams); - if (randomBoolean()) { - boolean updateAllTypes = randomBoolean(); - createIndexRequest.updateAllTypes(updateAllTypes); - if (updateAllTypes) { - expectedParams.put("update_all_types", Boolean.TRUE.toString()); - } - } - Request request = Request.createIndex(createIndexRequest); assertEquals("/" + indexName, request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); @@ -270,18 +318,44 @@ public void testCreateIndex() throws IOException { assertToXContentBody(createIndexRequest, request.getEntity()); } - public void testDeleteIndex() { - DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(); + public void testPutMapping() throws IOException { + PutMappingRequest putMappingRequest = new PutMappingRequest(); int numIndices = randomIntBetween(0, 5); String[] indices = new String[numIndices]; for (int i = 0; i < numIndices; i++) { indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5); } - deleteIndexRequest.indices(indices); + putMappingRequest.indices(indices); + + String type = randomAlphaOfLengthBetween(3, 10); + putMappingRequest.type(type); Map expectedParams = new HashMap<>(); + setRandomTimeout(putMappingRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + setRandomMasterTimeout(putMappingRequest, expectedParams); + + Request request = Request.putMapping(putMappingRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + String index = String.join(",", indices); + if (Strings.hasLength(index)) { + endpoint.add(index); + } + endpoint.add("_mapping"); + endpoint.add(type); + assertEquals(endpoint.toString(), request.getEndpoint()); + + assertEquals(expectedParams, request.getParameters()); + assertEquals("PUT", request.getMethod()); + assertToXContentBody(putMappingRequest, request.getEntity()); + } + + public void testDeleteIndex() { + String[] indices = randomIndicesNames(0, 5); + DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indices); + + Map expectedParams = new HashMap<>(); setRandomTimeout(deleteIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); setRandomMasterTimeout(deleteIndexRequest, expectedParams); @@ -295,12 +369,8 @@ public void testDeleteIndex() { } public void testOpenIndex() { - OpenIndexRequest openIndexRequest = new OpenIndexRequest(); - int numIndices = randomIntBetween(1, 5); - String[] indices = new String[numIndices]; - for (int i = 0; i < numIndices; i++) { - indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5); - } + String[] indices = randomIndicesNames(1, 5); + OpenIndexRequest openIndexRequest = new OpenIndexRequest(indices); openIndexRequest.indices(indices); Map expectedParams = new HashMap<>(); @@ -317,6 +387,23 @@ public void testOpenIndex() { assertThat(request.getEntity(), nullValue()); } + public void testCloseIndex() { + String[] indices = randomIndicesNames(1, 5); + CloseIndexRequest closeIndexRequest = new CloseIndexRequest(indices); + + Map expectedParams = new HashMap<>(); + setRandomTimeout(closeIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + setRandomMasterTimeout(closeIndexRequest, expectedParams); + setRandomIndicesOptions(closeIndexRequest::indicesOptions, closeIndexRequest::indicesOptions, expectedParams); + + Request request = Request.closeIndex(closeIndexRequest); + StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_close"); + assertThat(endpoint.toString(), equalTo(request.getEndpoint())); + assertThat(expectedParams, equalTo(request.getParameters())); + assertThat(request.getMethod(), equalTo("POST")); + assertThat(request.getEntity(), nullValue()); + } + public void testIndex() throws IOException { String index = randomAlphaOfLengthBetween(3, 10); String type = randomAlphaOfLengthBetween(3, 10); @@ -694,13 +781,9 @@ public void testBulkWithDifferentContentTypes() throws IOException { } public void testSearch() throws Exception { - SearchRequest searchRequest = new SearchRequest(); - int numIndices = randomIntBetween(0, 5); - String[] indices = new String[numIndices]; - for (int i = 0; i < numIndices; i++) { - indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5); - } - searchRequest.indices(indices); + String[] indices = randomIndicesNames(0, 5); + SearchRequest searchRequest = new SearchRequest(indices); + int numTypes = randomIntBetween(0, 5); String[] types = new String[numTypes]; for (int i = 0; i < numTypes; i++) { @@ -737,44 +820,47 @@ public void testSearch() throws Exception { setRandomIndicesOptions(searchRequest::indicesOptions, searchRequest::indicesOptions, expectedParams); - SearchSourceBuilder searchSourceBuilder = null; + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + //rarely skip setting the search source completely if (frequently()) { - searchSourceBuilder = new SearchSourceBuilder(); - if (randomBoolean()) { - searchSourceBuilder.size(randomIntBetween(0, Integer.MAX_VALUE)); - } - if (randomBoolean()) { - searchSourceBuilder.from(randomIntBetween(0, Integer.MAX_VALUE)); - } - if (randomBoolean()) { - searchSourceBuilder.minScore(randomFloat()); - } - if (randomBoolean()) { - searchSourceBuilder.explain(randomBoolean()); - } - if (randomBoolean()) { - searchSourceBuilder.profile(randomBoolean()); - } - if (randomBoolean()) { - searchSourceBuilder.highlighter(new HighlightBuilder().field(randomAlphaOfLengthBetween(3, 10))); - } - if (randomBoolean()) { - searchSourceBuilder.query(new TermQueryBuilder(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10))); - } - if (randomBoolean()) { - searchSourceBuilder.aggregation(new TermsAggregationBuilder(randomAlphaOfLengthBetween(3, 10), ValueType.STRING) - .field(randomAlphaOfLengthBetween(3, 10))); - } - if (randomBoolean()) { - searchSourceBuilder.suggest(new SuggestBuilder().addSuggestion(randomAlphaOfLengthBetween(3, 10), - new CompletionSuggestionBuilder(randomAlphaOfLengthBetween(3, 10)))); - } - if (randomBoolean()) { - searchSourceBuilder.addRescorer(new QueryRescorerBuilder( - new TermQueryBuilder(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10)))); - } - if (randomBoolean()) { - searchSourceBuilder.collapse(new CollapseBuilder(randomAlphaOfLengthBetween(3, 10))); + //frequently set the search source to have some content, otherwise leave it empty but still set it + if (frequently()) { + if (randomBoolean()) { + searchSourceBuilder.size(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + searchSourceBuilder.from(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + searchSourceBuilder.minScore(randomFloat()); + } + if (randomBoolean()) { + searchSourceBuilder.explain(randomBoolean()); + } + if (randomBoolean()) { + searchSourceBuilder.profile(randomBoolean()); + } + if (randomBoolean()) { + searchSourceBuilder.highlighter(new HighlightBuilder().field(randomAlphaOfLengthBetween(3, 10))); + } + if (randomBoolean()) { + searchSourceBuilder.query(new TermQueryBuilder(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10))); + } + if (randomBoolean()) { + searchSourceBuilder.aggregation(new TermsAggregationBuilder(randomAlphaOfLengthBetween(3, 10), ValueType.STRING) + .field(randomAlphaOfLengthBetween(3, 10))); + } + if (randomBoolean()) { + searchSourceBuilder.suggest(new SuggestBuilder().addSuggestion(randomAlphaOfLengthBetween(3, 10), + new CompletionSuggestionBuilder(randomAlphaOfLengthBetween(3, 10)))); + } + if (randomBoolean()) { + searchSourceBuilder.addRescorer(new QueryRescorerBuilder( + new TermQueryBuilder(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10)))); + } + if (randomBoolean()) { + searchSourceBuilder.collapse(new CollapseBuilder(randomAlphaOfLengthBetween(3, 10))); + } } searchRequest.source(searchSourceBuilder); } @@ -792,11 +878,7 @@ public void testSearch() throws Exception { endpoint.add("_search"); assertEquals(endpoint.toString(), request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); - if (searchSourceBuilder == null) { - assertNull(request.getEntity()); - } else { - assertToXContentBody(searchSourceBuilder, request.getEntity()); - } + assertToXContentBody(searchSourceBuilder, request.getEntity()); } public void testMultiSearch() throws IOException { @@ -1076,4 +1158,13 @@ private static String randomFields(String[] fields) { } return excludesParam.toString(); } + + private static String[] randomIndicesNames(int minIndicesNum, int maxIndicesNum) { + int numIndices = randomIntBetween(minIndicesNum, maxIndicesNum); + String[] indices = new String[numIndices]; + for (int i = 0; i < numIndices; i++) { + indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT); + } + return indices; + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index 42d19fab82fe9..23029c7c6b007 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -22,10 +22,16 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; +import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; +import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.ESRestHighLevelClientTestCase; @@ -58,7 +64,7 @@ public void testDeleteIndex() throws IOException { RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().createIndex(new CreateIndexRequest("posts")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("posts")); assertTrue(createIndexResponse.isAcknowledged()); } @@ -80,7 +86,7 @@ public void testDeleteIndex() throws IOException { // end::delete-index-request-indicesOptions // tag::delete-index-execute - DeleteIndexResponse deleteIndexResponse = client.indices().deleteIndex(request); + DeleteIndexResponse deleteIndexResponse = client.indices().delete(request); // end::delete-index-execute // tag::delete-index-response @@ -93,7 +99,7 @@ public void testDeleteIndex() throws IOException { // tag::delete-index-notfound try { DeleteIndexRequest request = new DeleteIndexRequest("does_not_exist"); - client.indices().deleteIndex(request); + client.indices().delete(request); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.NOT_FOUND) { // <1> @@ -107,7 +113,7 @@ public void testDeleteIndexAsync() throws Exception { final RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().createIndex(new CreateIndexRequest("posts")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("posts")); assertTrue(createIndexResponse.isAcknowledged()); } @@ -115,7 +121,7 @@ public void testDeleteIndexAsync() throws Exception { DeleteIndexRequest request = new DeleteIndexRequest("posts"); // tag::delete-index-execute-async - client.indices().deleteIndexAsync(request, new ActionListener() { + client.indices().deleteAsync(request, new ActionListener() { @Override public void onResponse(DeleteIndexResponse deleteIndexResponse) { // <1> @@ -153,15 +159,15 @@ public void testCreateIndex() throws IOException { // tag::create-index-request-mappings request.mapping("tweet", // <1> - " {\n" + - " \"tweet\": {\n" + - " \"properties\": {\n" + - " \"message\": {\n" + - " \"type\": \"text\"\n" + - " }\n" + + "{\n" + + " \"tweet\": {\n" + + " \"properties\": {\n" + + " \"message\": {\n" + + " \"type\": \"text\"\n" + " }\n" + " }\n" + - " }", // <2> + " }\n" + + "}", // <2> XContentType.JSON); // end::create-index-request-mappings @@ -185,7 +191,7 @@ public void testCreateIndex() throws IOException { // end::create-index-request-waitForActiveShards // tag::create-index-execute - CreateIndexResponse createIndexResponse = client.indices().createIndex(request); + CreateIndexResponse createIndexResponse = client.indices().create(request); // end::create-index-execute // tag::create-index-response @@ -203,7 +209,7 @@ public void testCreateIndexAsync() throws Exception { { CreateIndexRequest request = new CreateIndexRequest("twitter"); // tag::create-index-execute-async - client.indices().createIndexAsync(request, new ActionListener() { + client.indices().createAsync(request, new ActionListener() { @Override public void onResponse(CreateIndexResponse createIndexResponse) { // <1> @@ -224,4 +230,218 @@ public void onFailure(Exception e) { } } + public void testPutMapping() throws IOException { + RestHighLevelClient client = highLevelClient(); + + { + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter")); + assertTrue(createIndexResponse.isAcknowledged()); + } + + { + // tag::put-mapping-request + PutMappingRequest request = new PutMappingRequest("twitter"); // <1> + request.type("tweet"); // <2> + // end::put-mapping-request + + // tag::put-mapping-request-source + request.source( + "{\n" + + " \"tweet\": {\n" + + " \"properties\": {\n" + + " \"message\": {\n" + + " \"type\": \"text\"\n" + + " }\n" + + " }\n" + + " }\n" + + "}", // <1> + XContentType.JSON); + // end::put-mapping-request-source + + // tag::put-mapping-request-timeout + request.timeout(TimeValue.timeValueMinutes(2)); // <1> + request.timeout("2m"); // <2> + // end::put-mapping-request-timeout + // tag::put-mapping-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::put-mapping-request-masterTimeout + + // tag::put-mapping-execute + PutMappingResponse putMappingResponse = client.indices().putMapping(request); + // end::put-mapping-execute + + // tag::put-mapping-response + boolean acknowledged = putMappingResponse.isAcknowledged(); // <1> + // end::put-mapping-response + assertTrue(acknowledged); + } + } + + public void testPutMappingAsync() throws Exception { + final RestHighLevelClient client = highLevelClient(); + + { + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter")); + assertTrue(createIndexResponse.isAcknowledged()); + } + + { + PutMappingRequest request = new PutMappingRequest("twitter").type("tweet"); + // tag::put-mapping-execute-async + client.indices().putMappingAsync(request, new ActionListener() { + @Override + public void onResponse(PutMappingResponse putMappingResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }); + // end::put-mapping-execute-async + + assertBusy(() -> { + // TODO Use Indices Exist API instead once it exists + Response response = client.getLowLevelClient().performRequest("HEAD", "twitter"); + assertTrue(RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode()); + }); + } + } + + public void testOpenIndex() throws IOException { + RestHighLevelClient client = highLevelClient(); + + { + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index")); + assertTrue(createIndexResponse.isAcknowledged()); + } + + { + // tag::open-index-request + OpenIndexRequest request = new OpenIndexRequest("index"); // <1> + // end::open-index-request + + // tag::open-index-request-timeout + request.timeout(TimeValue.timeValueMinutes(2)); // <1> + request.timeout("2m"); // <2> + // end::open-index-request-timeout + // tag::open-index-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::open-index-request-masterTimeout + // tag::open-index-request-waitForActiveShards + request.waitForActiveShards(2); // <1> + request.waitForActiveShards(ActiveShardCount.DEFAULT); // <2> + // end::open-index-request-waitForActiveShards + + + // tag::open-index-request-indicesOptions + request.indicesOptions(IndicesOptions.strictExpandOpen()); // <1> + // end::open-index-request-indicesOptions + + // tag::open-index-execute + OpenIndexResponse openIndexResponse = client.indices().open(request); + // end::open-index-execute + + // tag::open-index-response + boolean acknowledged = openIndexResponse.isAcknowledged(); // <1> + boolean shardsAcked = openIndexResponse.isShardsAcknowledged(); // <2> + // end::open-index-response + assertTrue(acknowledged); + assertTrue(shardsAcked); + + // tag::open-index-execute-async + client.indices().openAsync(request, new ActionListener() { + @Override + public void onResponse(OpenIndexResponse openIndexResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }); + // end::open-index-execute-async + } + + { + // tag::open-index-notfound + try { + OpenIndexRequest request = new OpenIndexRequest("does_not_exist"); + client.indices().open(request); + } catch (ElasticsearchException exception) { + if (exception.status() == RestStatus.BAD_REQUEST) { + // <1> + } + } + // end::open-index-notfound + } + } + + public void testCloseIndex() throws IOException { + RestHighLevelClient client = highLevelClient(); + + { + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index")); + assertTrue(createIndexResponse.isAcknowledged()); + } + + { + // tag::close-index-request + CloseIndexRequest request = new CloseIndexRequest("index"); // <1> + // end::close-index-request + + // tag::close-index-request-timeout + request.timeout(TimeValue.timeValueMinutes(2)); // <1> + request.timeout("2m"); // <2> + // end::close-index-request-timeout + // tag::close-index-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::close-index-request-masterTimeout + + // tag::close-index-request-indicesOptions + request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> + // end::close-index-request-indicesOptions + + // tag::close-index-execute + CloseIndexResponse closeIndexResponse = client.indices().close(request); + // end::close-index-execute + + // tag::close-index-response + boolean acknowledged = closeIndexResponse.isAcknowledged(); // <1> + // end::close-index-response + assertTrue(acknowledged); + + // tag::close-index-execute-async + client.indices().closeAsync(request, new ActionListener() { + @Override + public void onResponse(CloseIndexResponse closeIndexResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }); + // end::close-index-execute-async + } + + { + // tag::close-index-notfound + try { + CloseIndexRequest request = new CloseIndexRequest("does_not_exist"); + client.indices().close(request); + } catch (ElasticsearchException exception) { + if (exception.status() == RestStatus.BAD_REQUEST) { + // <1> + } + } + // end::close-index-notfound + } + } } diff --git a/distribution/build.gradle b/distribution/build.gradle index c6fc9d5b6946f..d322aa9c1ff12 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -83,7 +83,7 @@ ext.restTestExpansions = [ // we create the buildModules task above so the distribution subprojects can // depend on it, but we don't actually configure it until here so we can do a single // loop over modules to also setup cross task dependencies and increment our modules counter -project.rootProject.subprojects.findAll { it.path.startsWith(':modules:') }.each { Project module -> +project.rootProject.subprojects.findAll { it.parent.path == ':modules' }.each { Project module -> buildFullNotice { def defaultLicensesDir = new File(module.projectDir, 'licenses') if (defaultLicensesDir.exists()) { diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index 93f6ffe2c9b77..a9a7bd1e0a247 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -20,6 +20,7 @@ import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version + import java.util.regex.Matcher /** @@ -118,29 +119,31 @@ if (project.hasProperty('bwcVersion')) { task buildBwcVersion(type: Exec) { dependsOn checkoutBwcBranch, writeBuildMetadata workingDir = checkoutDir + if (project.rootProject.ext.runtimeJavaVersion == JavaVersion.VERSION_1_8 && ["5.6", "6.0", "6.1"].contains(bwcBranch)) { + /* + * If runtime Java home is set to JDK 8 and we are building branches that are officially built with JDK 8, push this to JAVA_HOME for + * these builds. + */ + environment('JAVA_HOME', System.getenv('RUNTIME_JAVA_HOME')) + } if (Os.isFamily(Os.FAMILY_WINDOWS)) { executable 'cmd' args '/C', 'call', new File(checkoutDir, 'gradlew').toString() } else { - executable = new File(checkoutDir, 'gradlew').toString() + executable new File(checkoutDir, 'gradlew').toString() } - final ArrayList commandLineArgs = [ - ":distribution:deb:assemble", - ":distribution:rpm:assemble", - ":distribution:zip:assemble", - "-Dbuild.snapshot=${System.getProperty('build.snapshot') ?: 'true'}"] + args ":distribution:deb:assemble", ":distribution:rpm:assemble", ":distribution:zip:assemble", "-Dbuild.snapshot=${System.getProperty('build.snapshot') ?: 'true'}" final LogLevel logLevel = gradle.startParameter.logLevel if ([LogLevel.QUIET, LogLevel.WARN, LogLevel.INFO, LogLevel.DEBUG].contains(logLevel)) { - commandLineArgs << "--${logLevel.name().toLowerCase(Locale.ENGLISH)}" + args "--${logLevel.name().toLowerCase(Locale.ENGLISH)}" } final String showStacktraceName = gradle.startParameter.showStacktrace.name() assert ["INTERNAL_EXCEPTIONS", "ALWAYS", "ALWAYS_FULL"].contains(showStacktraceName) if (showStacktraceName.equals("ALWAYS")) { - commandLineArgs << "--stacktrace" + args "--stacktrace" } else if (showStacktraceName.equals("ALWAYS_FULL")) { - commandLineArgs << "--full-stacktrace" + args "--full-stacktrace" } - args commandLineArgs doLast { List missing = [bwcDeb, bwcRpm, bwcZip].grep { file -> false == file.exists() diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index a8b7db48a7c1c..5675d3e80070f 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -646,9 +646,11 @@ private void installMetaPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, Environment env, List deleteOnFailure) throws Exception { final MetaPluginInfo metaInfo = MetaPluginInfo.readFromProperties(tmpRoot); verifyPluginName(env.pluginsFile(), metaInfo.getName(), tmpRoot); + final Path destination = env.pluginsFile().resolve(metaInfo.getName()); deleteOnFailure.add(destination); terminal.println(VERBOSE, metaInfo.toString()); + final List pluginPaths = new ArrayList<>(); try (DirectoryStream paths = Files.newDirectoryStream(tmpRoot)) { // Extract bundled plugins path and validate plugin names @@ -665,19 +667,11 @@ private void installMetaPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, for (Path plugin : pluginPaths) { final PluginInfo info = verify(terminal, plugin, isBatch, env); pluginInfos.add(info); - Path tmpBinDir = plugin.resolve("bin"); - if (Files.exists(tmpBinDir)) { - Path destBinDir = env.binFile().resolve(metaInfo.getName()); - deleteOnFailure.add(destBinDir); - installBin(info, tmpBinDir, destBinDir); - } - - Path tmpConfigDir = plugin.resolve("config"); - if (Files.exists(tmpConfigDir)) { - // some files may already exist, and we don't remove plugin config files on plugin removal, - // so any installed config files are left on failure too - Path destConfigDir = env.configFile().resolve(metaInfo.getName()); - installConfig(info, tmpConfigDir, destConfigDir); + installPluginSupportFiles(info, plugin, env.binFile().resolve(metaInfo.getName()), + env.configFile().resolve(metaInfo.getName()), deleteOnFailure); + // ensure the plugin dir within the tmpRoot has the correct name + if (plugin.getFileName().toString().equals(info.getName()) == false) { + Files.move(plugin, plugin.getParent().resolve(info.getName()), StandardCopyOption.ATOMIC_MOVE); } } movePlugin(tmpRoot, destination); @@ -693,7 +687,7 @@ private void installMetaPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, /** * Installs the plugin from {@code tmpRoot} into the plugins dir. - * If the plugin has a bin dir and/or a config dir, those are copied. + * If the plugin has a bin dir and/or a config dir, those are moved. */ private void installPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, Environment env, List deleteOnFailure) throws Exception { @@ -701,9 +695,20 @@ private void installPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, final Path destination = env.pluginsFile().resolve(info.getName()); deleteOnFailure.add(destination); + installPluginSupportFiles(info, tmpRoot, env.binFile().resolve(info.getName()), + env.configFile().resolve(info.getName()), deleteOnFailure); + movePlugin(tmpRoot, destination); + if (info.requiresKeystore()) { + createKeystoreIfNeeded(terminal, env, info); + } + terminal.println("-> Installed " + info.getName()); + } + + /** Moves bin and config directories from the plugin if they exist */ + private void installPluginSupportFiles(PluginInfo info, Path tmpRoot, + Path destBinDir, Path destConfigDir, List deleteOnFailure) throws Exception { Path tmpBinDir = tmpRoot.resolve("bin"); if (Files.exists(tmpBinDir)) { - Path destBinDir = env.binFile().resolve(info.getName()); deleteOnFailure.add(destBinDir); installBin(info, tmpBinDir, destBinDir); } @@ -712,14 +717,8 @@ private void installPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, if (Files.exists(tmpConfigDir)) { // some files may already exist, and we don't remove plugin config files on plugin removal, // so any installed config files are left on failure too - Path destConfigDir = env.configFile().resolve(info.getName()); installConfig(info, tmpConfigDir, destConfigDir); } - movePlugin(tmpRoot, destination); - if (info.requiresKeystore()) { - createKeystoreIfNeeded(terminal, env, info); - } - terminal.println("-> Installed " + info.getName()); } /** Moves the plugin directory into its final destination. **/ diff --git a/docs/java-rest/high-level/apis/close_index.asciidoc b/docs/java-rest/high-level/apis/close_index.asciidoc new file mode 100644 index 0000000000000..a4d0f6383532e --- /dev/null +++ b/docs/java-rest/high-level/apis/close_index.asciidoc @@ -0,0 +1,70 @@ +[[java-rest-high-close-index]] +=== Close Index API + +[[java-rest-high-close-index-request]] +==== Close Index Request + +A `CloseIndexRequest` requires an `index` argument: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-request] +-------------------------------------------------- +<1> The index to close + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the index is closed +as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the index is closed +as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-request-indicesOptions] +-------------------------------------------------- +<1> Setting `IndicesOptions` controls how unavailable indices are resolved and +how wildcard expressions are expanded + +[[java-rest-high-close-index-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-execute] +-------------------------------------------------- + +[[java-rest-high-close-index-async]] +==== Asynchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-execute-async] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-close-index-response]] +==== Close Index Response + +The returned `CloseIndexResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-response] +-------------------------------------------------- +<1> Indicates whether all of the nodes have acknowledged the request diff --git a/docs/java-rest/high-level/apis/createindex.asciidoc b/docs/java-rest/high-level/apis/createindex.asciidoc index ebd9158e19387..bfc7794c8f9a0 100644 --- a/docs/java-rest/high-level/apis/createindex.asciidoc +++ b/docs/java-rest/high-level/apis/createindex.asciidoc @@ -48,7 +48,7 @@ The following arguments can optionally be provided: include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-request-timeout] -------------------------------------------------- <1> Timeout to wait for the all the nodes to acknowledge the index creation as a `TimeValue` -<2> Timeout to wait for the all the nodes to acknowledge the index creatiom as a `String` +<2> Timeout to wait for the all the nodes to acknowledge the index creation as a `String` ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -61,8 +61,10 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-reque -------------------------------------------------- include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-request-waitForActiveShards] -------------------------------------------------- -<1> The number of active shard copies to wait for before proceeding with the operation, as an `int`. -<2> The number of active shard copies to wait for before proceeding with the operation, as an `ActiveShardCount`. +<1> The number of active shard copies to wait for before the create index API returns a +response, as an `int`. +<2> The number of active shard copies to wait for before the create index API returns a +response, as an `ActiveShardCount`. [[java-rest-high-create-index-sync]] ==== Synchronous Execution diff --git a/docs/java-rest/high-level/apis/index.asciidoc b/docs/java-rest/high-level/apis/index.asciidoc index 2312f28372060..f7367b6e8c26d 100644 --- a/docs/java-rest/high-level/apis/index.asciidoc +++ b/docs/java-rest/high-level/apis/index.asciidoc @@ -1,10 +1,25 @@ include::createindex.asciidoc[] + include::deleteindex.asciidoc[] + +include::open_index.asciidoc[] + +include::close_index.asciidoc[] + +include::putmapping.asciidoc[] + include::_index.asciidoc[] + include::get.asciidoc[] + include::delete.asciidoc[] + include::update.asciidoc[] + include::bulk.asciidoc[] + include::search.asciidoc[] + include::scroll.asciidoc[] + include::main.asciidoc[] diff --git a/docs/java-rest/high-level/apis/open_index.asciidoc b/docs/java-rest/high-level/apis/open_index.asciidoc new file mode 100644 index 0000000000000..a30e62123a814 --- /dev/null +++ b/docs/java-rest/high-level/apis/open_index.asciidoc @@ -0,0 +1,81 @@ +[[java-rest-high-open-index]] +=== Open Index API + +[[java-rest-high-open-index-request]] +==== Open Index Request + +An `OpenIndexRequest` requires an `index` argument: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request] +-------------------------------------------------- +<1> The index to open + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the index is opened +as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the index is opened +as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request-waitForActiveShards] +-------------------------------------------------- +<1> The number of active shard copies to wait for before the open index API +returns a response, as an `int`. +<2> The number of active shard copies to wait for before the open index API +returns a response, as an `ActiveShardCount`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request-indicesOptions] +-------------------------------------------------- +<1> Setting `IndicesOptions` controls how unavailable indices are resolved and +how wildcard expressions are expanded + +[[java-rest-high-open-index-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-execute] +-------------------------------------------------- + +[[java-rest-high-open-index-async]] +==== Asynchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-execute-async] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-open-index-response]] +==== Open Index Response + +The returned `OpenIndexResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-response] +-------------------------------------------------- +<1> Indicates whether all of the nodes have acknowledged the request +<2> Indicates whether the requisite number of shard copies were started for +each shard in the index before timing out diff --git a/docs/java-rest/high-level/apis/putmapping.asciidoc b/docs/java-rest/high-level/apis/putmapping.asciidoc new file mode 100644 index 0000000000000..57b8ec8964a9a --- /dev/null +++ b/docs/java-rest/high-level/apis/putmapping.asciidoc @@ -0,0 +1,71 @@ +[[java-rest-high-put-mapping]] +=== Put Mapping API + +[[java-rest-high-put-mapping-request]] +==== Put Mapping Request + +A `PutMappingRequest` requires an `index` argument, and a type: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-request] +-------------------------------------------------- +<1> The index to add the mapping to +<2> The type to create (or update) + +==== Mapping source +A description of the fields to create on the mapping; if not defined, the mapping will default to empty. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-request-source] +-------------------------------------------------- +<1> The mapping source + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the index creation as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the index creation as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +[[java-rest-high-put-mapping-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-execute] +-------------------------------------------------- + +[[java-rest-high-put-mapping-async]] +==== Asynchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-execute-async] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-put-mapping-response]] +==== Put Mapping Response + +The returned `PutMappingResponse` allows to retrieve information about the executed + operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-response] +-------------------------------------------------- +<1> Indicates whether all of the nodes have acknowledged the request diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 7a6b55619f77a..aede4789f4dec 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -6,6 +6,9 @@ The Java High Level REST Client supports the following APIs: Indices APIs:: * <> * <> +* <> +* <> +* <> Single document APIs:: * <> diff --git a/docs/painless/painless-getting-started.asciidoc b/docs/painless/painless-getting-started.asciidoc index 155b5f272b426..7898631416b6b 100644 --- a/docs/painless/painless-getting-started.asciidoc +++ b/docs/painless/painless-getting-started.asciidoc @@ -320,7 +320,7 @@ POST hockey/player/_update_by_query Note: all of the `_update_by_query` examples above could really do with a `query` to limit the data that they pull back. While you *could* use a -See {ref}/query-dsl-script-query.html[script query] it wouldn't be as efficient +{ref}/query-dsl-script-query.html[script query] it wouldn't be as efficient as using any other query because script queries aren't able to use the inverted index to limit the documents that they have to check. diff --git a/docs/reference/aggregations/bucket/composite-aggregation.asciidoc b/docs/reference/aggregations/bucket/composite-aggregation.asciidoc index 2e4b9a1101108..be18689bfddc4 100644 --- a/docs/reference/aggregations/bucket/composite-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/composite-aggregation.asciidoc @@ -224,8 +224,40 @@ Time values can also be specified via abbreviations supported by < + } + } + } + ] + } + } + } +} +-------------------------------------------------- +// CONSOLE + +<1> Supports expressive date <> + +====== Time Zone Date-times are stored in Elasticsearch in UTC. By default, all bucketing and rounding is also done in UTC. The `time_zone` parameter can be used to indicate diff --git a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc index e5966e56b35dd..0c19bf172bbf0 100644 --- a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc @@ -1,5 +1,5 @@ [[search-aggregations-metrics-top-hits-aggregation]] -=== Top hits Aggregation +=== Top Hits Aggregation A `top_hits` metric aggregator keeps track of the most relevant document being aggregated. This aggregator is intended to be used as a sub aggregator, so that the top matching documents can be aggregated per bucket. diff --git a/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc index 5dc1b80d4adda..4cc532c99c5d2 100644 --- a/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc @@ -41,7 +41,7 @@ for more details) |Required | details)|Optional |`skip` |=== -The following snippet only retains buckets where the total sales for the month is more than 400: +The following snippet only retains buckets where the total sales for the month is more than 200: [source,js] -------------------------------------------------- diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index 1ce44b6028db8..cb976601fdcbe 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -171,7 +171,7 @@ The `basque` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /armenian_example +PUT /basque_example { "settings": { "analysis": { @@ -536,7 +536,7 @@ The `dutch` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /detch_example +PUT /dutch_example { "settings": { "analysis": { @@ -1554,7 +1554,7 @@ The `swedish` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /swidish_example +PUT /swedish_example { "settings": { "analysis": { diff --git a/docs/reference/index-modules/translog.asciidoc b/docs/reference/index-modules/translog.asciidoc index 31d529b6c4436..b1eb36e346d9f 100644 --- a/docs/reference/index-modules/translog.asciidoc +++ b/docs/reference/index-modules/translog.asciidoc @@ -1,41 +1,44 @@ [[index-modules-translog]] == Translog -Changes to Lucene are only persisted to disk during a Lucene commit, -which is a relatively heavy operation and so cannot be performed after every -index or delete operation. Changes that happen after one commit and before another -will be lost in the event of process exit or HW failure. - -To prevent this data loss, each shard has a _transaction log_ or write ahead -log associated with it. Any index or delete operation is written to the -translog after being processed by the internal Lucene index. - -In the event of a crash, recent transactions can be replayed from the -transaction log when the shard recovers. +Changes to Lucene are only persisted to disk during a Lucene commit, which is a +relatively expensive operation and so cannot be performed after every index or +delete operation. Changes that happen after one commit and before another will +be removed from the index by Lucene in the event of process exit or hardware +failure. + +Because Lucene commits are too expensive to perform on every individual change, +each shard copy also has a _transaction log_ known as its _translog_ associated +with it. All index and delete operations are written to the translog after +being processed by the internal Lucene index but before they are acknowledged. +In the event of a crash, recent transactions that have been acknowledged but +not yet included in the last Lucene commit can instead be recovered from the +translog when the shard recovers. An Elasticsearch flush is the process of performing a Lucene commit and -starting a new translog. It is done automatically in the background in order -to make sure the transaction log doesn't grow too large, which would make +starting a new translog. Flushes are performed automatically in the background +in order to make sure the translog doesn't grow too large, which would make replaying its operations take a considerable amount of time during recovery. -It is also exposed through an API, though its rarely needed to be performed -manually. +The ability to perform a flush manually is also exposed through an API, +although this is rarely needed. [float] === Translog settings -The data in the transaction log is only persisted to disk when the translog is +The data in the translog is only persisted to disk when the translog is ++fsync++ed and committed. In the event of hardware failure, any data written since the previous translog commit will be lost. -By default, Elasticsearch ++fsync++s and commits the translog every 5 seconds if `index.translog.durability` is set -to `async` or if set to `request` (default) at the end of every <>, <>, -<>, or <> request. In fact, Elasticsearch -will only report success of an index, delete, update, or bulk request to the -client after the transaction log has been successfully ++fsync++ed and committed -on the primary and on every allocated replica. +By default, Elasticsearch ++fsync++s and commits the translog every 5 seconds +if `index.translog.durability` is set to `async` or if set to `request` +(default) at the end of every <>, <>, +<>, or <> request. More precisely, if set +to `request`, Elasticsearch will only report success of an index, delete, +update, or bulk request to the client after the translog has been successfully +++fsync++ed and committed on the primary and on every allocated replica. -The following <> per-index settings -control the behaviour of the transaction log: +The following <> per-index +settings control the behaviour of the translog: `index.translog.sync_interval`:: @@ -64,17 +67,20 @@ update, or bulk request. This setting accepts the following parameters: `index.translog.flush_threshold_size`:: -The translog stores all operations that are not yet safely persisted in Lucene (i.e., are -not part of a lucene commit point). Although these operations are available for reads, they will -need to be reindexed if the shard was to shutdown and has to be recovered. This settings controls -the maximum total size of these operations, to prevent recoveries from taking too long. Once the -maximum size has been reached a flush will happen, generating a new Lucene commit. Defaults to `512mb`. +The translog stores all operations that are not yet safely persisted in Lucene +(i.e., are not part of a Lucene commit point). Although these operations are +available for reads, they will need to be reindexed if the shard was to +shutdown and has to be recovered. This settings controls the maximum total size +of these operations, to prevent recoveries from taking too long. Once the +maximum size has been reached a flush will happen, generating a new Lucene +commit point. Defaults to `512mb`. `index.translog.retention.size`:: -The total size of translog files to keep. Keeping more translog files increases the chance of performing -an operation based sync when recovering replicas. If the translog files are not sufficient, replica recovery -will fall back to a file based sync. Defaults to `512mb` +The total size of translog files to keep. Keeping more translog files increases +the chance of performing an operation based sync when recovering replicas. If +the translog files are not sufficient, replica recovery will fall back to a +file based sync. Defaults to `512mb` `index.translog.retention.age`:: @@ -86,10 +92,14 @@ The maximum duration for which translog files will be kept. Defaults to `12h`. [[corrupt-translog-truncation]] === What to do if the translog becomes corrupted? -In some cases (a bad drive, user error) the translog can become corrupted. When -this corruption is detected by Elasticsearch due to mismatching checksums, -Elasticsearch will fail the shard and refuse to allocate that copy of the data -to the node, recovering from a replica if available. +In some cases (a bad drive, user error) the translog on a shard copy can become +corrupted. When this corruption is detected by Elasticsearch due to mismatching +checksums, Elasticsearch will fail that shard copy and refuse to use that copy +of the data. If there are other copies of the shard available then +Elasticsearch will automatically recover from one of them using the normal +shard allocation and recovery mechanism. In particular, if the corrupt shard +copy was the primary when the corruption was detected then one of its replicas +will be promoted in its place. If there is no copy of the data from which Elasticsearch can recover successfully, a user may want to recover the data that is part of the shard at diff --git a/docs/reference/mapping/types/numeric.asciidoc b/docs/reference/mapping/types/numeric.asciidoc index 5e9de317bac38..af58eac659e48 100644 --- a/docs/reference/mapping/types/numeric.asciidoc +++ b/docs/reference/mapping/types/numeric.asciidoc @@ -8,9 +8,9 @@ The following numeric types are supported: `integer`:: A signed 32-bit integer with a minimum value of +-2^31^+ and a maximum value of +2^31^-1+. `short`:: A signed 16-bit integer with a minimum value of +-32,768+ and a maximum value of +32,767+. `byte`:: A signed 8-bit integer with a minimum value of +-128+ and a maximum value of +127+. -`double`:: A double-precision 64-bit IEEE 754 floating point number. -`float`:: A single-precision 32-bit IEEE 754 floating point number. -`half_float`:: A half-precision 16-bit IEEE 754 floating point number. +`double`:: A double-precision 64-bit IEEE 754 floating point number, restricted to finite values. +`float`:: A single-precision 32-bit IEEE 754 floating point number, restricted to finite values. +`half_float`:: A half-precision 16-bit IEEE 754 floating point number, restricted to finite values. `scaled_float`:: A floating point number that is backed by a `long`, scaled by a fixed `double` scaling factor. Below is an example of configuring a mapping with numeric fields: diff --git a/docs/reference/mapping/types/range.asciidoc b/docs/reference/mapping/types/range.asciidoc index 76aba68277121..0ef4a463c9bb1 100644 --- a/docs/reference/mapping/types/range.asciidoc +++ b/docs/reference/mapping/types/range.asciidoc @@ -47,18 +47,16 @@ PUT range_index/_doc/1 -------------------------------------------------- //CONSOLE -The following is an example of a `date_range` query over the `date_range` field named "time_frame". +The following is an example of a <> on the `integer_range` field named "expected_attendees". [source,js] -------------------------------------------------- -POST range_index/_search +GET range_index/_search { "query" : { - "range" : { - "time_frame" : { <5> - "gte" : "2015-10-31", - "lte" : "2015-11-01", - "relation" : "within" <6> + "term" : { + "expected_attendees" : { + "value": 12 } } } @@ -104,6 +102,27 @@ The result produced by the above query. -------------------------------------------------- // TESTRESPONSE[s/"took": 13/"took" : $body.took/] + +The following is an example of a `date_range` query over the `date_range` field named "time_frame". + +[source,js] +-------------------------------------------------- +GET range_index/_search +{ + "query" : { + "range" : { + "time_frame" : { <5> + "gte" : "2015-10-31", + "lte" : "2015-11-01", + "relation" : "within" <6> + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:range_index] + <1> `date_range` types accept the same field parameters defined by the <> type. <2> Example indexing a meeting with 10 to 20 attendees. <3> Date ranges accept the same format as described in <>. @@ -112,6 +131,44 @@ The result produced by the above query. <6> Range queries over range <> support a `relation` parameter which can be one of `WITHIN`, `CONTAINS`, `INTERSECTS` (default). +This query produces a similar result: + +[source,js] +-------------------------------------------------- +{ + "took": 13, + "timed_out": false, + "_shards" : { + "total": 2, + "successful": 2, + "skipped" : 0, + "failed": 0 + }, + "hits" : { + "total" : 1, + "max_score" : 1.0, + "hits" : [ + { + "_index" : "range_index", + "_type" : "_doc", + "_id" : "1", + "_score" : 1.0, + "_source" : { + "expected_attendees" : { + "gte" : 10, "lte" : 20 + }, + "time_frame" : { + "gte" : "2015-10-31 12:00:00", "lte" : "2015-11-01" + } + } + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took": 13/"took" : $body.took/] + + [[range-params]] ==== Parameters for range fields diff --git a/docs/reference/migration/migrate_7_0/mappings.asciidoc b/docs/reference/migration/migrate_7_0/mappings.asciidoc index 215282c49d7bd..ece9cac5962ab 100644 --- a/docs/reference/migration/migrate_7_0/mappings.asciidoc +++ b/docs/reference/migration/migrate_7_0/mappings.asciidoc @@ -13,4 +13,8 @@ The `index_options` field for numeric fields has been deprecated in 6 and has n To safeguard against out of memory errors, the number of nested json objects within a single document across all fields has been limited to 10000. This default limit can be changed with -the index setting `index.mapping.nested_objects.limit`. \ No newline at end of file +the index setting `index.mapping.nested_objects.limit`. + +==== The `update_all_types` option has been removed + +This option is useless now that all indices have at most one type. diff --git a/docs/reference/modules/gateway.asciidoc b/docs/reference/modules/gateway.asciidoc index 0af0d31fba2c5..76e0840793996 100644 --- a/docs/reference/modules/gateway.asciidoc +++ b/docs/reference/modules/gateway.asciidoc @@ -48,3 +48,12 @@ as long as the following conditions are met: Recover as long as this many data nodes have joined the cluster. NOTE: These settings only take effect on a full cluster restart. + +=== Dangling indices + +When a node joins the cluster, any shards stored in its local data directory +directory which do not already exist in the cluster will be imported into the +cluster. This functionality is intended as a best effort to help users who +lose all master nodes. If a new master node is started which is unaware of +the other indices in the cluster, adding the old nodes will cause the old +indices to be imported, instead of being deleted. diff --git a/docs/reference/query-dsl/query-string-syntax.asciidoc b/docs/reference/query-dsl/query-string-syntax.asciidoc index 8a7b394b2e870..c73543c99a1d9 100644 --- a/docs/reference/query-dsl/query-string-syntax.asciidoc +++ b/docs/reference/query-dsl/query-string-syntax.asciidoc @@ -56,7 +56,7 @@ match the query string `"a* b* c*"`. [WARNING] ======= Pure wildcards `\*` are rewritten to <> queries for efficiency. -As a consequence, the wildcard `"field:*"` would match documents with an emtpy value +As a consequence, the wildcard `"field:*"` would match documents with an empty value like the following: ``` { diff --git a/docs/reference/query-dsl/term-query.asciidoc b/docs/reference/query-dsl/term-query.asciidoc index 4b668203a33ad..f1224f33ca7d4 100644 --- a/docs/reference/query-dsl/term-query.asciidoc +++ b/docs/reference/query-dsl/term-query.asciidoc @@ -51,6 +51,8 @@ GET _search as the query clause for `normal`. <2> The `normal` clause has the default neutral boost of `1.0`. +A `term` query can also match against <>. + .Why doesn't the `term` query match my document? ************************************************** diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index fd680bb6d6c2d..4552366de9800 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -24,7 +24,7 @@ GET /_search }, "highlight" : { "fields" : { - "comment" : {} + "content" : {} } } } diff --git a/docs/reference/search/request/rescore.asciidoc b/docs/reference/search/request/rescore.asciidoc index 204960544a688..6e1bb2a9e6ce2 100644 --- a/docs/reference/search/request/rescore.asciidoc +++ b/docs/reference/search/request/rescore.asciidoc @@ -15,7 +15,8 @@ Currently the rescore API has only one implementation: the query rescorer, which uses a query to tweak the scoring. In the future, alternative rescorers may be made available, for example, a pair-wise rescorer. -NOTE: the `rescore` phase is not executed when <> is used. +NOTE: An error will be thrown if an explicit <> (other than `_score`) +is provided with a `rescore` query. NOTE: when exposing pagination to your users, you should not change `window_size` as you step through each page (by passing different diff --git a/libs/build.gradle b/libs/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/libs/elasticsearch-core/src/main/eclipse-build.gradle b/libs/elasticsearch-core/src/main/eclipse-build.gradle new file mode 100644 index 0000000000000..9c84a4d6bd84b --- /dev/null +++ b/libs/elasticsearch-core/src/main/eclipse-build.gradle @@ -0,0 +1,2 @@ +// this is just shell gradle file for eclipse to have separate projects for elasticsearch-core src and tests +apply from: '../../build.gradle' diff --git a/libs/elasticsearch-core/src/test/eclipse-build.gradle b/libs/elasticsearch-core/src/test/eclipse-build.gradle new file mode 100644 index 0000000000000..f43f019941bb2 --- /dev/null +++ b/libs/elasticsearch-core/src/test/eclipse-build.gradle @@ -0,0 +1,6 @@ +// this is just shell gradle file for eclipse to have separate projects for elasticsearch-core src and tests +apply from: '../../build.gradle' + +dependencies { + testCompile project(':libs:elasticsearch-core') +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AbstractNioChannel.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AbstractNioChannel.java index 8285fef6d3985..14e2365eb7e82 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AbstractNioChannel.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AbstractNioChannel.java @@ -26,7 +26,6 @@ import java.nio.channels.SelectableChannel; import java.nio.channels.SelectionKey; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; /** @@ -48,9 +47,6 @@ public abstract class AbstractNioChannel implements NioChannel { final S socketChannel; - // This indicates if the channel has been scheduled to be closed. Read the closeFuture to determine if - // the channel close process has completed. - final AtomicBoolean isClosing = new AtomicBoolean(false); private final InetSocketAddress localAddress; private final CompletableFuture closeContext = new CompletableFuture<>(); @@ -73,21 +69,6 @@ public InetSocketAddress getLocalAddress() { return localAddress; } - /** - * Schedules a channel to be closed by the selector event loop with which it is registered. - *

- * If the channel is open and the state can be transitioned to closed, the close operation will - * be scheduled with the event loop. - *

- * If the channel is already set to closed, it is assumed that it is already scheduled to be closed. - */ - @Override - public void close() { - if (isClosing.compareAndSet(false, true)) { - selector.queueChannelClose(this); - } - } - /** * Closes the channel synchronously. This method should only be called from the selector thread. *

@@ -95,11 +76,10 @@ public void close() { */ @Override public void closeFromSelector() throws IOException { - assert selector.isOnCurrentThread() : "Should only call from selector thread"; - isClosing.set(true); + selector.assertOnSelectorThread(); if (closeContext.isDone() == false) { try { - closeRawChannel(); + socketChannel.close(); closeContext.complete(null); } catch (IOException e) { closeContext.completeExceptionally(e); @@ -139,13 +119,13 @@ public void addCloseListener(BiConsumer listener) { closeContext.whenComplete(listener); } + @Override + public void close() { + getContext().closeChannel(); + } + // Package visibility for testing void setSelectionKey(SelectionKey selectionKey) { this.selectionKey = selectionKey; } - // Package visibility for testing - - void closeRawChannel() throws IOException { - socketChannel.close(); - } } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AcceptorEventHandler.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AcceptorEventHandler.java index a5727d9ef597a..eb5194f21ef3b 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AcceptorEventHandler.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AcceptorEventHandler.java @@ -67,7 +67,7 @@ protected void acceptChannel(NioServerSocketChannel nioServerChannel) throws IOE ChannelFactory channelFactory = nioServerChannel.getChannelFactory(); SocketSelector selector = selectorSupplier.get(); NioSocketChannel nioSocketChannel = channelFactory.acceptNioChannel(nioServerChannel, selector); - nioServerChannel.getAcceptContext().accept(nioSocketChannel); + nioServerChannel.getContext().acceptChannel(nioSocketChannel); } /** diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java new file mode 100644 index 0000000000000..5d77675aa4819 --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java @@ -0,0 +1,166 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.util.LinkedList; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiConsumer; + +public class BytesChannelContext extends SocketChannelContext { + + private final ReadConsumer readConsumer; + private final InboundChannelBuffer channelBuffer; + private final LinkedList queued = new LinkedList<>(); + private final AtomicBoolean isClosing = new AtomicBoolean(false); + + public BytesChannelContext(NioSocketChannel channel, BiConsumer exceptionHandler, + ReadConsumer readConsumer, InboundChannelBuffer channelBuffer) { + super(channel, exceptionHandler); + this.readConsumer = readConsumer; + this.channelBuffer = channelBuffer; + } + + @Override + public int read() throws IOException { + if (channelBuffer.getRemaining() == 0) { + // Requiring one additional byte will ensure that a new page is allocated. + channelBuffer.ensureCapacity(channelBuffer.getCapacity() + 1); + } + + int bytesRead = readFromChannel(channelBuffer.sliceBuffersFrom(channelBuffer.getIndex())); + + if (bytesRead == 0) { + return 0; + } + + channelBuffer.incrementIndex(bytesRead); + + int bytesConsumed = Integer.MAX_VALUE; + while (bytesConsumed > 0 && channelBuffer.getIndex() > 0) { + bytesConsumed = readConsumer.consumeReads(channelBuffer); + channelBuffer.release(bytesConsumed); + } + + return bytesRead; + } + + @Override + public void sendMessage(ByteBuffer[] buffers, BiConsumer listener) { + if (isClosing.get()) { + listener.accept(null, new ClosedChannelException()); + return; + } + + BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener); + SocketSelector selector = channel.getSelector(); + if (selector.isOnCurrentThread() == false) { + selector.queueWrite(writeOperation); + return; + } + + selector.queueWriteInChannelBuffer(writeOperation); + } + + @Override + public void queueWriteOperation(WriteOperation writeOperation) { + channel.getSelector().assertOnSelectorThread(); + queued.add((BytesWriteOperation) writeOperation); + } + + @Override + public void flushChannel() throws IOException { + channel.getSelector().assertOnSelectorThread(); + int ops = queued.size(); + if (ops == 1) { + singleFlush(queued.pop()); + } else if (ops > 1) { + multiFlush(); + } + } + + @Override + public boolean hasQueuedWriteOps() { + channel.getSelector().assertOnSelectorThread(); + return queued.isEmpty() == false; + } + + @Override + public void closeChannel() { + if (isClosing.compareAndSet(false, true)) { + channel.getSelector().queueChannelClose(channel); + } + } + + @Override + public boolean selectorShouldClose() { + return isPeerClosed() || hasIOException() || isClosing.get(); + } + + @Override + public void closeFromSelector() throws IOException { + channel.getSelector().assertOnSelectorThread(); + if (channel.isOpen()) { + IOException channelCloseException = null; + try { + channel.closeFromSelector(); + } catch (IOException e) { + channelCloseException = e; + } + // Set to true in order to reject new writes before queuing with selector + isClosing.set(true); + channelBuffer.close(); + for (BytesWriteOperation op : queued) { + channel.getSelector().executeFailedListener(op.getListener(), new ClosedChannelException()); + } + queued.clear(); + if (channelCloseException != null) { + throw channelCloseException; + } + } + } + + private void singleFlush(BytesWriteOperation headOp) throws IOException { + try { + int written = flushToChannel(headOp.getBuffersToWrite()); + headOp.incrementIndex(written); + } catch (IOException e) { + channel.getSelector().executeFailedListener(headOp.getListener(), e); + throw e; + } + + if (headOp.isFullyFlushed()) { + channel.getSelector().executeListener(headOp.getListener(), null); + } else { + queued.push(headOp); + } + } + + private void multiFlush() throws IOException { + boolean lastOpCompleted = true; + while (lastOpCompleted && queued.isEmpty() == false) { + BytesWriteOperation op = queued.pop(); + singleFlush(op); + lastOpCompleted = op.isFullyFlushed(); + } + } +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesReadContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesReadContext.java deleted file mode 100644 index eeda147be6c70..0000000000000 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesReadContext.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import java.io.IOException; - -public class BytesReadContext implements ReadContext { - - private final NioSocketChannel channel; - private final ReadConsumer readConsumer; - private final InboundChannelBuffer channelBuffer; - - public BytesReadContext(NioSocketChannel channel, ReadConsumer readConsumer, InboundChannelBuffer channelBuffer) { - this.channel = channel; - this.channelBuffer = channelBuffer; - this.readConsumer = readConsumer; - } - - @Override - public int read() throws IOException { - if (channelBuffer.getRemaining() == 0) { - // Requiring one additional byte will ensure that a new page is allocated. - channelBuffer.ensureCapacity(channelBuffer.getCapacity() + 1); - } - - int bytesRead = channel.read(channelBuffer.sliceBuffersFrom(channelBuffer.getIndex())); - - if (bytesRead == -1) { - return bytesRead; - } - - channelBuffer.incrementIndex(bytesRead); - - int bytesConsumed = Integer.MAX_VALUE; - while (bytesConsumed > 0) { - bytesConsumed = readConsumer.consumeReads(channelBuffer); - channelBuffer.release(bytesConsumed); - } - - return bytesRead; - } - - @Override - public void close() { - channelBuffer.close(); - } -} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteContext.java deleted file mode 100644 index c2816deef5343..0000000000000 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteContext.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; -import java.util.LinkedList; -import java.util.function.BiConsumer; - -public class BytesWriteContext implements WriteContext { - - private final NioSocketChannel channel; - private final LinkedList queued = new LinkedList<>(); - - public BytesWriteContext(NioSocketChannel channel) { - this.channel = channel; - } - - @Override - public void sendMessage(Object message, BiConsumer listener) { - ByteBuffer[] buffers = (ByteBuffer[]) message; - if (channel.isWritable() == false) { - listener.accept(null, new ClosedChannelException()); - return; - } - - WriteOperation writeOperation = new WriteOperation(channel, buffers, listener); - SocketSelector selector = channel.getSelector(); - if (selector.isOnCurrentThread() == false) { - selector.queueWrite(writeOperation); - return; - } - - // TODO: Eval if we will allow writes from sendMessage - selector.queueWriteInChannelBuffer(writeOperation); - } - - @Override - public void queueWriteOperations(WriteOperation writeOperation) { - assert channel.getSelector().isOnCurrentThread() : "Must be on selector thread to queue writes"; - queued.add(writeOperation); - } - - @Override - public void flushChannel() throws IOException { - assert channel.getSelector().isOnCurrentThread() : "Must be on selector thread to flush writes"; - int ops = queued.size(); - if (ops == 1) { - singleFlush(queued.pop()); - } else if (ops > 1) { - multiFlush(); - } - } - - @Override - public boolean hasQueuedWriteOps() { - assert channel.getSelector().isOnCurrentThread() : "Must be on selector thread to access queued writes"; - return queued.isEmpty() == false; - } - - @Override - public void clearQueuedWriteOps(Exception e) { - assert channel.getSelector().isOnCurrentThread() : "Must be on selector thread to clear queued writes"; - for (WriteOperation op : queued) { - channel.getSelector().executeFailedListener(op.getListener(), e); - } - queued.clear(); - } - - private void singleFlush(WriteOperation headOp) throws IOException { - try { - headOp.flush(); - } catch (IOException e) { - channel.getSelector().executeFailedListener(headOp.getListener(), e); - throw e; - } - - if (headOp.isFullyFlushed()) { - channel.getSelector().executeListener(headOp.getListener(), null); - } else { - queued.push(headOp); - } - } - - private void multiFlush() throws IOException { - boolean lastOpCompleted = true; - while (lastOpCompleted && queued.isEmpty() == false) { - WriteOperation op = queued.pop(); - singleFlush(op); - lastOpCompleted = op.isFullyFlushed(); - } - } -} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteOperation.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteOperation.java new file mode 100644 index 0000000000000..14e8cace66d09 --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteOperation.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.function.BiConsumer; + +public class BytesWriteOperation implements WriteOperation { + + private final NioSocketChannel channel; + private final BiConsumer listener; + private final ByteBuffer[] buffers; + private final int[] offsets; + private final int length; + private int internalIndex; + + public BytesWriteOperation(NioSocketChannel channel, ByteBuffer[] buffers, BiConsumer listener) { + this.channel = channel; + this.listener = listener; + this.buffers = buffers; + this.offsets = new int[buffers.length]; + int offset = 0; + for (int i = 0; i < buffers.length; i++) { + ByteBuffer buffer = buffers[i]; + offsets[i] = offset; + offset += buffer.remaining(); + } + length = offset; + } + + @Override + public BiConsumer getListener() { + return listener; + } + + @Override + public NioSocketChannel getChannel() { + return channel; + } + + public boolean isFullyFlushed() { + assert length >= internalIndex : "Should never have an index that is greater than the length [length=" + length + ", index=" + + internalIndex + "]"; + return internalIndex == length; + } + + public void incrementIndex(int delta) { + internalIndex += delta; + assert length >= internalIndex : "Should never increment index past length [length=" + length + ", post-increment index=" + + internalIndex + ", delta=" + delta + "]"; + } + + public ByteBuffer[] getBuffersToWrite() { + final int index = Arrays.binarySearch(offsets, internalIndex); + int offsetIndex = index < 0 ? (-(index + 1)) - 1 : index; + + ByteBuffer[] postIndexBuffers = new ByteBuffer[buffers.length - offsetIndex]; + + ByteBuffer firstBuffer = buffers[offsetIndex].duplicate(); + firstBuffer.position(internalIndex - offsets[offsetIndex]); + postIndexBuffers[0] = firstBuffer; + int j = 1; + for (int i = (offsetIndex + 1); i < buffers.length; ++i) { + postIndexBuffers[j++] = buffers[i].duplicate(); + } + + return postIndexBuffers; + } + +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelContext.java new file mode 100644 index 0000000000000..fa664484c1c59 --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelContext.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.io.IOException; + +public interface ChannelContext { + /** + * This method cleans up any context resources that need to be released when a channel is closed. It + * should only be called by the selector thread. + * + * @throws IOException during channel / context close + */ + void closeFromSelector() throws IOException; + + /** + * Schedules a channel to be closed by the selector event loop with which it is registered. + * + * If the channel is open and the state can be transitioned to closed, the close operation will + * be scheduled with the event loop. + * + * Depending on the underlying protocol of the channel, a close operation might simply close the socket + * channel or may involve reading and writing messages. + */ + void closeChannel(); + + void handleException(Exception e); +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelFactory.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelFactory.java index d90927af8b91a..5fc3f46f998e6 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelFactory.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelFactory.java @@ -88,9 +88,7 @@ public ServerSocket openNioServerSocketChannel(InetSocketAddress address, Accept private Socket internalCreateChannel(SocketSelector selector, SocketChannel rawChannel) throws IOException { try { Socket channel = createChannel(selector, rawChannel); - assert channel.getReadContext() != null : "read context should have been set on channel"; - assert channel.getWriteContext() != null : "write context should have been set on channel"; - assert channel.getExceptionContext() != null : "exception handler should have been set on channel"; + assert channel.getContext() != null : "channel context should have been set on channel"; return channel; } catch (Exception e) { closeRawChannel(rawChannel, e); diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ESSelector.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ESSelector.java index ed566ffa7daf8..e923df4bfa9b8 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ESSelector.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ESSelector.java @@ -163,6 +163,11 @@ public boolean isOnCurrentThread() { return Thread.currentThread() == thread; } + public void assertOnSelectorThread() { + assert isOnCurrentThread() : "Must be on selector thread to perform this operation. Currently on thread [" + + Thread.currentThread().getName() + "]."; + } + void wakeup() { // TODO: Do we need the wakeup optimizations that some other libraries use? selector.wakeup(); diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/EventHandler.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/EventHandler.java index 42bc0555d509c..7cba9b998b311 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/EventHandler.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/EventHandler.java @@ -69,7 +69,7 @@ protected void uncaughtException(Exception exception) { */ protected void handleClose(NioChannel channel) { try { - channel.closeFromSelector(); + channel.getContext().closeFromSelector(); } catch (IOException e) { closeException(channel, e); } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/InboundChannelBuffer.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/InboundChannelBuffer.java index 07b6b68908bd1..f671b39d4d61b 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/InboundChannelBuffer.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/InboundChannelBuffer.java @@ -59,6 +59,10 @@ public InboundChannelBuffer(Supplier pageSupplier) { ensureCapacity(PAGE_SIZE); } + public static InboundChannelBuffer allocatingInstance() { + return new InboundChannelBuffer(() -> new Page(ByteBuffer.allocate(PAGE_SIZE), () -> {})); + } + @Override public void close() { if (isClosed.compareAndSet(false, true)) { diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioChannel.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioChannel.java index 433ec204e8684..690e3d3b38bda 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioChannel.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioChannel.java @@ -44,6 +44,8 @@ public interface NioChannel { NetworkChannel getRawChannel(); + ChannelContext getContext(); + /** * Adds a close listener to the channel. Multiple close listeners can be added. There is no guarantee * about the order in which close listeners will be executed. If the channel is already closed, the diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioServerSocketChannel.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioServerSocketChannel.java index 8eb904dc74179..3d1748e413ac7 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioServerSocketChannel.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioServerSocketChannel.java @@ -21,12 +21,13 @@ import java.io.IOException; import java.nio.channels.ServerSocketChannel; -import java.util.function.Consumer; +import java.util.concurrent.atomic.AtomicBoolean; public class NioServerSocketChannel extends AbstractNioChannel { private final ChannelFactory channelFactory; - private Consumer acceptContext; + private ServerChannelContext context; + private final AtomicBoolean contextSet = new AtomicBoolean(false); public NioServerSocketChannel(ServerSocketChannel socketChannel, ChannelFactory channelFactory, AcceptingSelector selector) throws IOException { @@ -39,17 +40,22 @@ public NioServerSocketChannel(ServerSocketChannel socketChannel, ChannelFactory< } /** - * This method sets the accept context for a server socket channel. The accept context is called when a - * new channel is accepted. The parameter passed to the context is the new channel. + * This method sets the context for a server socket channel. The context is called when a new channel is + * accepted, an exception occurs, or it is time to close the channel. * - * @param acceptContext to call + * @param context to call */ - public void setAcceptContext(Consumer acceptContext) { - this.acceptContext = acceptContext; + public void setContext(ServerChannelContext context) { + if (contextSet.compareAndSet(false, true)) { + this.context = context; + } else { + throw new IllegalStateException("Context on this channel were already set. It should only be once."); + } } - public Consumer getAcceptContext() { - return acceptContext; + @Override + public ServerChannelContext getContext() { + return context; } @Override diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java index 5260c0f5fcf16..aba98ff0cbff0 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; import java.nio.channels.SocketChannel; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicBoolean; @@ -33,10 +32,8 @@ public class NioSocketChannel extends AbstractNioChannel { private final InetSocketAddress remoteAddress; private final CompletableFuture connectContext = new CompletableFuture<>(); private final SocketSelector socketSelector; - private final AtomicBoolean contextsSet = new AtomicBoolean(false); - private WriteContext writeContext; - private ReadContext readContext; - private BiConsumer exceptionContext; + private final AtomicBoolean contextSet = new AtomicBoolean(false); + private SocketChannelContext context; private Exception connectException; public NioSocketChannel(SocketChannel socketChannel, SocketSelector selector) throws IOException { @@ -45,23 +42,15 @@ public NioSocketChannel(SocketChannel socketChannel, SocketSelector selector) th this.socketSelector = selector; } - @Override - public void closeFromSelector() throws IOException { - assert socketSelector.isOnCurrentThread() : "Should only call from selector thread"; - // Even if the channel has already been closed we will clear any pending write operations just in case - if (writeContext.hasQueuedWriteOps()) { - writeContext.clearQueuedWriteOps(new ClosedChannelException()); - } - readContext.close(); - - super.closeFromSelector(); - } - @Override public SocketSelector getSelector() { return socketSelector; } + public int write(ByteBuffer buffer) throws IOException { + return socketChannel.write(buffer); + } + public int write(ByteBuffer[] buffers) throws IOException { if (buffers.length == 1) { return socketChannel.write(buffers[0]); @@ -82,37 +71,17 @@ public int read(ByteBuffer[] buffers) throws IOException { } } - public int read(InboundChannelBuffer buffer) throws IOException { - int bytesRead = (int) socketChannel.read(buffer.sliceBuffersFrom(buffer.getIndex())); - - if (bytesRead == -1) { - return bytesRead; - } - - buffer.incrementIndex(bytesRead); - return bytesRead; - } - - public void setContexts(ReadContext readContext, WriteContext writeContext, BiConsumer exceptionContext) { - if (contextsSet.compareAndSet(false, true)) { - this.readContext = readContext; - this.writeContext = writeContext; - this.exceptionContext = exceptionContext; + public void setContext(SocketChannelContext context) { + if (contextSet.compareAndSet(false, true)) { + this.context = context; } else { - throw new IllegalStateException("Contexts on this channel were already set. They should only be once."); + throw new IllegalStateException("Context on this channel were already set. It should only be once."); } } - public WriteContext getWriteContext() { - return writeContext; - } - - public ReadContext getReadContext() { - return readContext; - } - - public BiConsumer getExceptionContext() { - return exceptionContext; + @Override + public SocketChannelContext getContext() { + return context; } public InetSocketAddress getRemoteAddress() { @@ -123,14 +92,6 @@ public boolean isConnectComplete() { return isConnectComplete0(); } - public boolean isWritable() { - return isClosing.get() == false; - } - - public boolean isReadable() { - return isClosing.get() == false; - } - /** * This method will attempt to complete the connection process for this channel. It should be called for * new channels or for a channel that has produced a OP_CONNECT event. If this method returns true then diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SelectionKeyUtils.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SelectionKeyUtils.java index b6272ce713501..be2dc6f3414bc 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SelectionKeyUtils.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SelectionKeyUtils.java @@ -26,28 +26,81 @@ public final class SelectionKeyUtils { private SelectionKeyUtils() {} + /** + * Adds an interest in writes for this channel while maintaining other interests. + * + * @param channel the channel + * @throws CancelledKeyException if the key was already cancelled + */ public static void setWriteInterested(NioChannel channel) throws CancelledKeyException { SelectionKey selectionKey = channel.getSelectionKey(); selectionKey.interestOps(selectionKey.interestOps() | SelectionKey.OP_WRITE); } + /** + * Removes an interest in writes for this channel while maintaining other interests. + * + * @param channel the channel + * @throws CancelledKeyException if the key was already cancelled + */ public static void removeWriteInterested(NioChannel channel) throws CancelledKeyException { SelectionKey selectionKey = channel.getSelectionKey(); selectionKey.interestOps(selectionKey.interestOps() & ~SelectionKey.OP_WRITE); } + /** + * Removes an interest in connects and reads for this channel while maintaining other interests. + * + * @param channel the channel + * @throws CancelledKeyException if the key was already cancelled + */ public static void setConnectAndReadInterested(NioChannel channel) throws CancelledKeyException { SelectionKey selectionKey = channel.getSelectionKey(); selectionKey.interestOps(selectionKey.interestOps() | SelectionKey.OP_CONNECT | SelectionKey.OP_READ); } + /** + * Removes an interest in connects, reads, and writes for this channel while maintaining other interests. + * + * @param channel the channel + * @throws CancelledKeyException if the key was already cancelled + */ + public static void setConnectReadAndWriteInterested(NioChannel channel) throws CancelledKeyException { + SelectionKey selectionKey = channel.getSelectionKey(); + selectionKey.interestOps(selectionKey.interestOps() | SelectionKey.OP_CONNECT | SelectionKey.OP_READ | SelectionKey.OP_WRITE); + } + + /** + * Removes an interest in connects for this channel while maintaining other interests. + * + * @param channel the channel + * @throws CancelledKeyException if the key was already cancelled + */ public static void removeConnectInterested(NioChannel channel) throws CancelledKeyException { SelectionKey selectionKey = channel.getSelectionKey(); selectionKey.interestOps(selectionKey.interestOps() & ~SelectionKey.OP_CONNECT); } - public static void setAcceptInterested(NioServerSocketChannel channel) { + /** + * Adds an interest in accepts for this channel while maintaining other interests. + * + * @param channel the channel + * @throws CancelledKeyException if the key was already cancelled + */ + public static void setAcceptInterested(NioServerSocketChannel channel) throws CancelledKeyException { SelectionKey selectionKey = channel.getSelectionKey(); selectionKey.interestOps(selectionKey.interestOps() | SelectionKey.OP_ACCEPT); } + + + /** + * Checks for an interest in writes for this channel. + * + * @param channel the channel + * @return a boolean indicating if we are currently interested in writes for this channel + * @throws CancelledKeyException if the key was already cancelled + */ + public static boolean isWriteInterested(NioSocketChannel channel) throws CancelledKeyException { + return (channel.getSelectionKey().interestOps() & SelectionKey.OP_WRITE) != 0; + } } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ServerChannelContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ServerChannelContext.java new file mode 100644 index 0000000000000..551cab48e0577 --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ServerChannelContext.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +public class ServerChannelContext implements ChannelContext { + + private final NioServerSocketChannel channel; + private final Consumer acceptor; + private final BiConsumer exceptionHandler; + private final AtomicBoolean isClosing = new AtomicBoolean(false); + + public ServerChannelContext(NioServerSocketChannel channel, Consumer acceptor, + BiConsumer exceptionHandler) { + this.channel = channel; + this.acceptor = acceptor; + this.exceptionHandler = exceptionHandler; + } + + public void acceptChannel(NioSocketChannel acceptedChannel) { + acceptor.accept(acceptedChannel); + } + + @Override + public void closeFromSelector() throws IOException { + channel.closeFromSelector(); + } + + @Override + public void closeChannel() { + if (isClosing.compareAndSet(false, true)) { + channel.getSelector().queueChannelClose(channel); + } + } + + @Override + public void handleException(Exception e) { + exceptionHandler.accept(channel, e); + } +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java new file mode 100644 index 0000000000000..62f82e8995d16 --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java @@ -0,0 +1,129 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.function.BiConsumer; + +/** + * This context should implement the specific logic for a channel. When a channel receives a notification + * that it is ready to perform certain operations (read, write, etc) the {@link SocketChannelContext} will + * be called. This context will need to implement all protocol related logic. Additionally, if any special + * close behavior is required, it should be implemented in this context. + * + * The only methods of the context that should ever be called from a non-selector thread are + * {@link #closeChannel()} and {@link #sendMessage(ByteBuffer[], BiConsumer)}. + */ +public abstract class SocketChannelContext implements ChannelContext { + + protected final NioSocketChannel channel; + private final BiConsumer exceptionHandler; + private boolean ioException; + private boolean peerClosed; + + protected SocketChannelContext(NioSocketChannel channel, BiConsumer exceptionHandler) { + this.channel = channel; + this.exceptionHandler = exceptionHandler; + } + + @Override + public void handleException(Exception e) { + exceptionHandler.accept(channel, e); + } + + public void channelRegistered() throws IOException {} + + public abstract int read() throws IOException; + + public abstract void sendMessage(ByteBuffer[] buffers, BiConsumer listener); + + public abstract void queueWriteOperation(WriteOperation writeOperation); + + public abstract void flushChannel() throws IOException; + + public abstract boolean hasQueuedWriteOps(); + + /** + * This method indicates if a selector should close this channel. + * + * @return a boolean indicating if the selector should close + */ + public abstract boolean selectorShouldClose(); + + protected boolean hasIOException() { + return ioException; + } + + protected boolean isPeerClosed() { + return peerClosed; + } + + protected int readFromChannel(ByteBuffer buffer) throws IOException { + try { + int bytesRead = channel.read(buffer); + if (bytesRead < 0) { + peerClosed = true; + bytesRead = 0; + } + return bytesRead; + } catch (IOException e) { + ioException = true; + throw e; + } + } + + protected int readFromChannel(ByteBuffer[] buffers) throws IOException { + try { + int bytesRead = channel.read(buffers); + if (bytesRead < 0) { + peerClosed = true; + bytesRead = 0; + } + return bytesRead; + } catch (IOException e) { + ioException = true; + throw e; + } + } + + protected int flushToChannel(ByteBuffer buffer) throws IOException { + try { + return channel.write(buffer); + } catch (IOException e) { + ioException = true; + throw e; + } + } + + protected int flushToChannel(ByteBuffer[] buffers) throws IOException { + try { + return channel.write(buffers); + } catch (IOException e) { + ioException = true; + throw e; + } + } + + @FunctionalInterface + public interface ReadConsumer { + int consumeReads(InboundChannelBuffer channelBuffer) throws IOException; + } +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java index d3be18f377638..b1192f11eb120 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java @@ -43,8 +43,14 @@ public SocketEventHandler(Logger logger) { * * @param channel that was registered */ - protected void handleRegistration(NioSocketChannel channel) { - SelectionKeyUtils.setConnectAndReadInterested(channel); + protected void handleRegistration(NioSocketChannel channel) throws IOException { + SocketChannelContext context = channel.getContext(); + context.channelRegistered(); + if (context.hasQueuedWriteOps()) { + SelectionKeyUtils.setConnectReadAndWriteInterested(channel); + } else { + SelectionKeyUtils.setConnectAndReadInterested(channel); + } } /** @@ -55,7 +61,7 @@ protected void handleRegistration(NioSocketChannel channel) { */ protected void registrationException(NioSocketChannel channel, Exception exception) { logger.debug(() -> new ParameterizedMessage("failed to register socket channel: {}", channel), exception); - exceptionCaught(channel, exception); + channel.getContext().handleException(exception); } /** @@ -76,7 +82,7 @@ protected void handleConnect(NioSocketChannel channel) { */ protected void connectException(NioSocketChannel channel, Exception exception) { logger.debug(() -> new ParameterizedMessage("failed to connect to socket channel: {}", channel), exception); - exceptionCaught(channel, exception); + channel.getContext().handleException(exception); } /** @@ -86,10 +92,7 @@ protected void connectException(NioSocketChannel channel, Exception exception) { * @param channel that can be read */ protected void handleRead(NioSocketChannel channel) throws IOException { - int bytesRead = channel.getReadContext().read(); - if (bytesRead == -1) { - handleClose(channel); - } + channel.getContext().read(); } /** @@ -100,23 +103,18 @@ protected void handleRead(NioSocketChannel channel) throws IOException { */ protected void readException(NioSocketChannel channel, Exception exception) { logger.debug(() -> new ParameterizedMessage("exception while reading from socket channel: {}", channel), exception); - exceptionCaught(channel, exception); + channel.getContext().handleException(exception); } /** * This method is called when a channel signals it is ready to receive writes. All of the write logic * should occur in this call. * - * @param channel that can be read + * @param channel that can be written to */ protected void handleWrite(NioSocketChannel channel) throws IOException { - WriteContext channelContext = channel.getWriteContext(); + SocketChannelContext channelContext = channel.getContext(); channelContext.flushChannel(); - if (channelContext.hasQueuedWriteOps()) { - SelectionKeyUtils.setWriteInterested(channel); - } else { - SelectionKeyUtils.removeWriteInterested(channel); - } } /** @@ -127,20 +125,7 @@ protected void handleWrite(NioSocketChannel channel) throws IOException { */ protected void writeException(NioSocketChannel channel, Exception exception) { logger.debug(() -> new ParameterizedMessage("exception while writing to socket channel: {}", channel), exception); - exceptionCaught(channel, exception); - } - - /** - * This method is called when handling an event from a channel fails due to an unexpected exception. - * An example would be if checking ready ops on a {@link java.nio.channels.SelectionKey} threw - * {@link java.nio.channels.CancelledKeyException}. - * - * @param channel that caused the exception - * @param exception that was thrown - */ - protected void genericChannelException(NioChannel channel, Exception exception) { - super.genericChannelException(channel, exception); - exceptionCaught((NioSocketChannel) channel, exception); + channel.getContext().handleException(exception); } /** @@ -153,7 +138,20 @@ protected void listenerException(BiConsumer listener, Exceptio logger.warn(new ParameterizedMessage("exception while executing listener: {}", listener), exception); } - private void exceptionCaught(NioSocketChannel channel, Exception e) { - channel.getExceptionContext().accept(channel, e); + /** + * @param channel that was handled + */ + protected void postHandling(NioSocketChannel channel) { + if (channel.getContext().selectorShouldClose()) { + handleClose(channel); + } else { + boolean currentlyWriteInterested = SelectionKeyUtils.isWriteInterested(channel); + boolean pendingWrites = channel.getContext().hasQueuedWriteOps(); + if (currentlyWriteInterested == false && pendingWrites) { + SelectionKeyUtils.setWriteInterested(channel); + } else if (currentlyWriteInterested && pendingWrites == false) { + SelectionKeyUtils.removeWriteInterested(channel); + } + } } } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java index ac8ad87b726a2..2de48fb8899e2 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java @@ -64,6 +64,8 @@ void processKey(SelectionKey selectionKey) { handleRead(nioSocketChannel); } } + + eventHandler.postHandling(nioSocketChannel); } @Override @@ -118,12 +120,12 @@ public void queueWrite(WriteOperation writeOperation) { * @param writeOperation to be queued in a channel's buffer */ public void queueWriteInChannelBuffer(WriteOperation writeOperation) { - assert isOnCurrentThread() : "Must be on selector thread"; + assertOnSelectorThread(); NioSocketChannel channel = writeOperation.getChannel(); - WriteContext context = channel.getWriteContext(); + SocketChannelContext context = channel.getContext(); try { SelectionKeyUtils.setWriteInterested(channel); - context.queueWriteOperations(writeOperation); + context.queueWriteOperation(writeOperation); } catch (Exception e) { executeFailedListener(writeOperation.getListener(), e); } @@ -137,7 +139,7 @@ public void queueWriteInChannelBuffer(WriteOperation writeOperation) { * @param value to provide to listener */ public void executeListener(BiConsumer listener, V value) { - assert isOnCurrentThread() : "Must be on selector thread"; + assertOnSelectorThread(); try { listener.accept(value, null); } catch (Exception e) { @@ -153,7 +155,7 @@ public void executeListener(BiConsumer listener, V value) { * @param exception to provide to listener */ public void executeFailedListener(BiConsumer listener, Exception exception) { - assert isOnCurrentThread() : "Must be on selector thread"; + assertOnSelectorThread(); try { listener.accept(null, exception); } catch (Exception e) { @@ -180,7 +182,7 @@ private void handleRead(NioSocketChannel nioSocketChannel) { private void handleQueuedWrites() { WriteOperation writeOperation; while ((writeOperation = queuedWrites.poll()) != null) { - if (writeOperation.getChannel().isWritable()) { + if (writeOperation.getChannel().isOpen()) { queueWriteInChannelBuffer(writeOperation); } else { executeFailedListener(writeOperation.getListener(), new ClosedChannelException()); diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java index b6fcc838a964f..d2dfe4f37a007 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java @@ -19,74 +19,16 @@ package org.elasticsearch.nio; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Arrays; import java.util.function.BiConsumer; -public class WriteOperation { - - private final NioSocketChannel channel; - private final BiConsumer listener; - private final ByteBuffer[] buffers; - private final int[] offsets; - private final int length; - private int internalIndex; - - public WriteOperation(NioSocketChannel channel, ByteBuffer[] buffers, BiConsumer listener) { - this.channel = channel; - this.listener = listener; - this.buffers = buffers; - this.offsets = new int[buffers.length]; - int offset = 0; - for (int i = 0; i < buffers.length; i++) { - ByteBuffer buffer = buffers[i]; - offsets[i] = offset; - offset += buffer.remaining(); - } - length = offset; - } - - public ByteBuffer[] getByteBuffers() { - return buffers; - } - - public BiConsumer getListener() { - return listener; - } - - public NioSocketChannel getChannel() { - return channel; - } - - public boolean isFullyFlushed() { - return internalIndex == length; - } - - public int flush() throws IOException { - int written = channel.write(getBuffersToWrite()); - internalIndex += written; - return written; - } - - private ByteBuffer[] getBuffersToWrite() { - int offsetIndex = getOffsetIndex(internalIndex); - - ByteBuffer[] postIndexBuffers = new ByteBuffer[buffers.length - offsetIndex]; - - ByteBuffer firstBuffer = buffers[offsetIndex].duplicate(); - firstBuffer.position(internalIndex - offsets[offsetIndex]); - postIndexBuffers[0] = firstBuffer; - int j = 1; - for (int i = (offsetIndex + 1); i < buffers.length; ++i) { - postIndexBuffers[j++] = buffers[i].duplicate(); - } +/** + * This is a basic write operation that can be queued with a channel. The only requirements of a write + * operation is that is has a listener and a reference to its channel. The actual conversion of the write + * operation implementation to bytes will be performed by the {@link SocketChannelContext}. + */ +public interface WriteOperation { - return postIndexBuffers; - } + BiConsumer getListener(); - private int getOffsetIndex(int offset) { - final int i = Arrays.binarySearch(offsets, offset); - return i < 0 ? (-(i + 1)) - 1 : i; - } + NioSocketChannel getChannel(); } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java index 9d8f47fe3ef4d..23ab3bb3e1d62 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java @@ -27,8 +27,6 @@ import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.util.ArrayList; -import java.util.function.BiConsumer; -import java.util.function.Consumer; import static org.mockito.Matchers.same; import static org.mockito.Mockito.mock; @@ -41,21 +39,21 @@ public class AcceptorEventHandlerTests extends ESTestCase { private SocketSelector socketSelector; private ChannelFactory channelFactory; private NioServerSocketChannel channel; - private Consumer acceptedChannelCallback; + private ServerChannelContext context; @Before @SuppressWarnings("unchecked") public void setUpHandler() throws IOException { channelFactory = mock(ChannelFactory.class); socketSelector = mock(SocketSelector.class); - acceptedChannelCallback = mock(Consumer.class); + context = mock(ServerChannelContext.class); ArrayList selectors = new ArrayList<>(); selectors.add(socketSelector); handler = new AcceptorEventHandler(logger, new RoundRobinSupplier<>(selectors.toArray(new SocketSelector[selectors.size()]))); AcceptingSelector selector = mock(AcceptingSelector.class); channel = new DoNotRegisterServerChannel(mock(ServerSocketChannel.class), channelFactory, selector); - channel.setAcceptContext(acceptedChannelCallback); + channel.setContext(context); channel.register(); } @@ -80,11 +78,11 @@ public void testHandleAcceptCallsChannelFactory() throws IOException { @SuppressWarnings("unchecked") public void testHandleAcceptCallsServerAcceptCallback() throws IOException { NioSocketChannel childChannel = new NioSocketChannel(mock(SocketChannel.class), socketSelector); - childChannel.setContexts(mock(ReadContext.class), mock(WriteContext.class), mock(BiConsumer.class)); + childChannel.setContext(mock(SocketChannelContext.class)); when(channelFactory.acceptNioChannel(same(channel), same(socketSelector))).thenReturn(childChannel); handler.acceptChannel(channel); - verify(acceptedChannelCallback).accept(childChannel); + verify(context).acceptChannel(childChannel); } } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java new file mode 100644 index 0000000000000..68ae1f2e50304 --- /dev/null +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java @@ -0,0 +1,337 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.util.function.BiConsumer; +import java.util.function.Supplier; + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.isNull; +import static org.mockito.Matchers.same; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class BytesChannelContextTests extends ESTestCase { + + private SocketChannelContext.ReadConsumer readConsumer; + private NioSocketChannel channel; + private BytesChannelContext context; + private InboundChannelBuffer channelBuffer; + private SocketSelector selector; + private BiConsumer listener; + private int messageLength; + + @Before + @SuppressWarnings("unchecked") + public void init() { + readConsumer = mock(SocketChannelContext.ReadConsumer.class); + + messageLength = randomInt(96) + 20; + selector = mock(SocketSelector.class); + listener = mock(BiConsumer.class); + channel = mock(NioSocketChannel.class); + channelBuffer = InboundChannelBuffer.allocatingInstance(); + context = new BytesChannelContext(channel, null, readConsumer, channelBuffer); + + when(channel.getSelector()).thenReturn(selector); + when(selector.isOnCurrentThread()).thenReturn(true); + } + + public void testSuccessfulRead() throws IOException { + byte[] bytes = createMessage(messageLength); + + when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> { + ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0]; + buffers[0].put(bytes); + return bytes.length; + }); + + when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, 0); + + assertEquals(messageLength, context.read()); + + assertEquals(0, channelBuffer.getIndex()); + assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity()); + verify(readConsumer, times(1)).consumeReads(channelBuffer); + } + + public void testMultipleReadsConsumed() throws IOException { + byte[] bytes = createMessage(messageLength * 2); + + when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> { + ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0]; + buffers[0].put(bytes); + return bytes.length; + }); + + when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, messageLength, 0); + + assertEquals(bytes.length, context.read()); + + assertEquals(0, channelBuffer.getIndex()); + assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity()); + verify(readConsumer, times(2)).consumeReads(channelBuffer); + } + + public void testPartialRead() throws IOException { + byte[] bytes = createMessage(messageLength); + + when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> { + ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0]; + buffers[0].put(bytes); + return bytes.length; + }); + + + when(readConsumer.consumeReads(channelBuffer)).thenReturn(0); + + assertEquals(messageLength, context.read()); + + assertEquals(bytes.length, channelBuffer.getIndex()); + verify(readConsumer, times(1)).consumeReads(channelBuffer); + + when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength * 2, 0); + + assertEquals(messageLength, context.read()); + + assertEquals(0, channelBuffer.getIndex()); + assertEquals(BigArrays.BYTE_PAGE_SIZE - (bytes.length * 2), channelBuffer.getCapacity()); + verify(readConsumer, times(2)).consumeReads(channelBuffer); + } + + public void testReadThrowsIOException() throws IOException { + IOException ioException = new IOException(); + when(channel.read(any(ByteBuffer[].class))).thenThrow(ioException); + + IOException ex = expectThrows(IOException.class, () -> context.read()); + assertSame(ioException, ex); + } + + public void testReadThrowsIOExceptionMeansReadyForClose() throws IOException { + when(channel.read(any(ByteBuffer[].class))).thenThrow(new IOException()); + + assertFalse(context.selectorShouldClose()); + expectThrows(IOException.class, () -> context.read()); + assertTrue(context.selectorShouldClose()); + } + + public void testReadLessThanZeroMeansReadyForClose() throws IOException { + when(channel.read(any(ByteBuffer[].class))).thenReturn(-1); + + assertEquals(0, context.read()); + + assertTrue(context.selectorShouldClose()); + } + + public void testCloseClosesChannelBuffer() throws IOException { + when(channel.isOpen()).thenReturn(true); + Runnable closer = mock(Runnable.class); + Supplier pageSupplier = () -> new InboundChannelBuffer.Page(ByteBuffer.allocate(1 << 14), closer); + InboundChannelBuffer buffer = new InboundChannelBuffer(pageSupplier); + buffer.ensureCapacity(1); + BytesChannelContext context = new BytesChannelContext(channel, null, readConsumer, buffer); + context.closeFromSelector(); + verify(closer).run(); + } + + public void testWriteFailsIfClosing() { + context.closeChannel(); + + ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; + context.sendMessage(buffers, listener); + + verify(listener).accept(isNull(Void.class), any(ClosedChannelException.class)); + } + + public void testSendMessageFromDifferentThreadIsQueuedWithSelector() throws Exception { + ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(BytesWriteOperation.class); + + when(selector.isOnCurrentThread()).thenReturn(false); + + ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; + context.sendMessage(buffers, listener); + + verify(selector).queueWrite(writeOpCaptor.capture()); + BytesWriteOperation writeOp = writeOpCaptor.getValue(); + + assertSame(listener, writeOp.getListener()); + assertSame(channel, writeOp.getChannel()); + assertEquals(buffers[0], writeOp.getBuffersToWrite()[0]); + } + + public void testSendMessageFromSameThreadIsQueuedInChannel() { + ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(BytesWriteOperation.class); + + ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; + context.sendMessage(buffers, listener); + + verify(selector).queueWriteInChannelBuffer(writeOpCaptor.capture()); + BytesWriteOperation writeOp = writeOpCaptor.getValue(); + + assertSame(listener, writeOp.getListener()); + assertSame(channel, writeOp.getChannel()); + assertEquals(buffers[0], writeOp.getBuffersToWrite()[0]); + } + + public void testWriteIsQueuedInChannel() { + assertFalse(context.hasQueuedWriteOps()); + + ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; + context.queueWriteOperation(new BytesWriteOperation(channel, buffer, listener)); + + assertTrue(context.hasQueuedWriteOps()); + } + + public void testWriteOpsClearedOnClose() throws Exception { + assertFalse(context.hasQueuedWriteOps()); + + ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; + context.queueWriteOperation(new BytesWriteOperation(channel, buffer, listener)); + + assertTrue(context.hasQueuedWriteOps()); + + when(channel.isOpen()).thenReturn(true); + context.closeFromSelector(); + + verify(selector).executeFailedListener(same(listener), any(ClosedChannelException.class)); + + assertFalse(context.hasQueuedWriteOps()); + } + + public void testQueuedWriteIsFlushedInFlushCall() throws Exception { + assertFalse(context.hasQueuedWriteOps()); + + ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; + BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); + context.queueWriteOperation(writeOperation); + + assertTrue(context.hasQueuedWriteOps()); + + when(writeOperation.getBuffersToWrite()).thenReturn(buffers); + when(writeOperation.isFullyFlushed()).thenReturn(true); + when(writeOperation.getListener()).thenReturn(listener); + context.flushChannel(); + + verify(channel).write(buffers); + verify(selector).executeListener(listener, null); + assertFalse(context.hasQueuedWriteOps()); + } + + public void testPartialFlush() throws IOException { + assertFalse(context.hasQueuedWriteOps()); + + BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); + context.queueWriteOperation(writeOperation); + + assertTrue(context.hasQueuedWriteOps()); + + when(writeOperation.isFullyFlushed()).thenReturn(false); + context.flushChannel(); + + verify(listener, times(0)).accept(null, null); + assertTrue(context.hasQueuedWriteOps()); + } + + @SuppressWarnings("unchecked") + public void testMultipleWritesPartialFlushes() throws IOException { + assertFalse(context.hasQueuedWriteOps()); + + BiConsumer listener2 = mock(BiConsumer.class); + BytesWriteOperation writeOperation1 = mock(BytesWriteOperation.class); + BytesWriteOperation writeOperation2 = mock(BytesWriteOperation.class); + when(writeOperation1.getListener()).thenReturn(listener); + when(writeOperation2.getListener()).thenReturn(listener2); + context.queueWriteOperation(writeOperation1); + context.queueWriteOperation(writeOperation2); + + assertTrue(context.hasQueuedWriteOps()); + + when(writeOperation1.isFullyFlushed()).thenReturn(true); + when(writeOperation2.isFullyFlushed()).thenReturn(false); + context.flushChannel(); + + verify(selector).executeListener(listener, null); + verify(listener2, times(0)).accept(null, null); + assertTrue(context.hasQueuedWriteOps()); + + when(writeOperation2.isFullyFlushed()).thenReturn(true); + + context.flushChannel(); + + verify(selector).executeListener(listener2, null); + assertFalse(context.hasQueuedWriteOps()); + } + + public void testWhenIOExceptionThrownListenerIsCalled() throws IOException { + assertFalse(context.hasQueuedWriteOps()); + + ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; + BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); + context.queueWriteOperation(writeOperation); + + assertTrue(context.hasQueuedWriteOps()); + + IOException exception = new IOException(); + when(writeOperation.getBuffersToWrite()).thenReturn(buffers); + when(channel.write(buffers)).thenThrow(exception); + when(writeOperation.getListener()).thenReturn(listener); + expectThrows(IOException.class, () -> context.flushChannel()); + + verify(selector).executeFailedListener(listener, exception); + assertFalse(context.hasQueuedWriteOps()); + } + + public void testWriteIOExceptionMeansChannelReadyToClose() throws IOException { + ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; + BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); + context.queueWriteOperation(writeOperation); + + IOException exception = new IOException(); + when(writeOperation.getBuffersToWrite()).thenReturn(buffers); + when(channel.write(buffers)).thenThrow(exception); + + assertFalse(context.selectorShouldClose()); + expectThrows(IOException.class, () -> context.flushChannel()); + assertTrue(context.selectorShouldClose()); + } + + public void initiateCloseSchedulesCloseWithSelector() { + context.closeChannel(); + verify(selector).queueChannelClose(channel); + } + + private static byte[] createMessage(int length) { + byte[] bytes = new byte[length]; + for (int i = 0; i < length; ++i) { + bytes[i] = randomByte(); + } + return bytes; + } +} diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesReadContextTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesReadContextTests.java deleted file mode 100644 index 69f187378aca5..0000000000000 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesReadContextTests.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.function.Supplier; - -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class BytesReadContextTests extends ESTestCase { - - private ReadContext.ReadConsumer readConsumer; - private NioSocketChannel channel; - private BytesReadContext readContext; - private InboundChannelBuffer channelBuffer; - private int messageLength; - - @Before - public void init() { - readConsumer = mock(ReadContext.ReadConsumer.class); - - messageLength = randomInt(96) + 20; - channel = mock(NioSocketChannel.class); - Supplier pageSupplier = () -> - new InboundChannelBuffer.Page(ByteBuffer.allocate(BigArrays.BYTE_PAGE_SIZE), () -> {}); - channelBuffer = new InboundChannelBuffer(pageSupplier); - readContext = new BytesReadContext(channel, readConsumer, channelBuffer); - } - - public void testSuccessfulRead() throws IOException { - byte[] bytes = createMessage(messageLength); - - when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> { - ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0]; - buffers[0].put(bytes); - return bytes.length; - }); - - when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, 0); - - assertEquals(messageLength, readContext.read()); - - assertEquals(0, channelBuffer.getIndex()); - assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity()); - verify(readConsumer, times(2)).consumeReads(channelBuffer); - } - - public void testMultipleReadsConsumed() throws IOException { - byte[] bytes = createMessage(messageLength * 2); - - when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> { - ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0]; - buffers[0].put(bytes); - return bytes.length; - }); - - when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, messageLength, 0); - - assertEquals(bytes.length, readContext.read()); - - assertEquals(0, channelBuffer.getIndex()); - assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity()); - verify(readConsumer, times(3)).consumeReads(channelBuffer); - } - - public void testPartialRead() throws IOException { - byte[] bytes = createMessage(messageLength); - - when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> { - ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0]; - buffers[0].put(bytes); - return bytes.length; - }); - - - when(readConsumer.consumeReads(channelBuffer)).thenReturn(0, messageLength); - - assertEquals(messageLength, readContext.read()); - - assertEquals(bytes.length, channelBuffer.getIndex()); - verify(readConsumer, times(1)).consumeReads(channelBuffer); - - when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength * 2, 0); - - assertEquals(messageLength, readContext.read()); - - assertEquals(0, channelBuffer.getIndex()); - assertEquals(BigArrays.BYTE_PAGE_SIZE - (bytes.length * 2), channelBuffer.getCapacity()); - verify(readConsumer, times(3)).consumeReads(channelBuffer); - } - - public void testReadThrowsIOException() throws IOException { - IOException ioException = new IOException(); - when(channel.read(any(ByteBuffer[].class))).thenThrow(ioException); - - IOException ex = expectThrows(IOException.class, () -> readContext.read()); - assertSame(ioException, ex); - } - - public void closeClosesChannelBuffer() { - InboundChannelBuffer buffer = mock(InboundChannelBuffer.class); - BytesReadContext readContext = new BytesReadContext(channel, readConsumer, buffer); - - readContext.close(); - - verify(buffer).close(); - } - - private static byte[] createMessage(int length) { - byte[] bytes = new byte[length]; - for (int i = 0; i < length; ++i) { - bytes[i] = randomByte(); - } - return bytes; - } -} diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesWriteContextTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesWriteContextTests.java deleted file mode 100644 index 9d5b1c92cb6b7..0000000000000 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesWriteContextTests.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; -import org.mockito.ArgumentCaptor; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; -import java.util.function.BiConsumer; - -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.isNull; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class BytesWriteContextTests extends ESTestCase { - - private SocketSelector selector; - private BiConsumer listener; - private BytesWriteContext writeContext; - private NioSocketChannel channel; - - @Before - @SuppressWarnings("unchecked") - public void setUp() throws Exception { - super.setUp(); - selector = mock(SocketSelector.class); - listener = mock(BiConsumer.class); - channel = mock(NioSocketChannel.class); - writeContext = new BytesWriteContext(channel); - - when(channel.getSelector()).thenReturn(selector); - when(selector.isOnCurrentThread()).thenReturn(true); - } - - public void testWriteFailsIfChannelNotWritable() throws Exception { - when(channel.isWritable()).thenReturn(false); - - ByteBuffer[] buffers = {ByteBuffer.wrap(generateBytes(10))}; - writeContext.sendMessage(buffers, listener); - - verify(listener).accept(isNull(Void.class), any(ClosedChannelException.class)); - } - - public void testSendMessageFromDifferentThreadIsQueuedWithSelector() throws Exception { - ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(WriteOperation.class); - - when(selector.isOnCurrentThread()).thenReturn(false); - when(channel.isWritable()).thenReturn(true); - - ByteBuffer[] buffers = {ByteBuffer.wrap(generateBytes(10))}; - writeContext.sendMessage(buffers, listener); - - verify(selector).queueWrite(writeOpCaptor.capture()); - WriteOperation writeOp = writeOpCaptor.getValue(); - - assertSame(listener, writeOp.getListener()); - assertSame(channel, writeOp.getChannel()); - assertEquals(buffers[0], writeOp.getByteBuffers()[0]); - } - - public void testSendMessageFromSameThreadIsQueuedInChannel() throws Exception { - ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(WriteOperation.class); - - when(channel.isWritable()).thenReturn(true); - - ByteBuffer[] buffers = {ByteBuffer.wrap(generateBytes(10))}; - writeContext.sendMessage(buffers, listener); - - verify(selector).queueWriteInChannelBuffer(writeOpCaptor.capture()); - WriteOperation writeOp = writeOpCaptor.getValue(); - - assertSame(listener, writeOp.getListener()); - assertSame(channel, writeOp.getChannel()); - assertEquals(buffers[0], writeOp.getByteBuffers()[0]); - } - - public void testWriteIsQueuedInChannel() throws Exception { - assertFalse(writeContext.hasQueuedWriteOps()); - - ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; - writeContext.queueWriteOperations(new WriteOperation(channel, buffer, listener)); - - assertTrue(writeContext.hasQueuedWriteOps()); - } - - public void testWriteOpsCanBeCleared() throws Exception { - assertFalse(writeContext.hasQueuedWriteOps()); - - ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; - writeContext.queueWriteOperations(new WriteOperation(channel, buffer, listener)); - - assertTrue(writeContext.hasQueuedWriteOps()); - - ClosedChannelException e = new ClosedChannelException(); - writeContext.clearQueuedWriteOps(e); - - verify(selector).executeFailedListener(listener, e); - - assertFalse(writeContext.hasQueuedWriteOps()); - } - - public void testQueuedWriteIsFlushedInFlushCall() throws Exception { - assertFalse(writeContext.hasQueuedWriteOps()); - - WriteOperation writeOperation = mock(WriteOperation.class); - writeContext.queueWriteOperations(writeOperation); - - assertTrue(writeContext.hasQueuedWriteOps()); - - when(writeOperation.isFullyFlushed()).thenReturn(true); - when(writeOperation.getListener()).thenReturn(listener); - writeContext.flushChannel(); - - verify(writeOperation).flush(); - verify(selector).executeListener(listener, null); - assertFalse(writeContext.hasQueuedWriteOps()); - } - - public void testPartialFlush() throws IOException { - assertFalse(writeContext.hasQueuedWriteOps()); - - WriteOperation writeOperation = mock(WriteOperation.class); - writeContext.queueWriteOperations(writeOperation); - - assertTrue(writeContext.hasQueuedWriteOps()); - - when(writeOperation.isFullyFlushed()).thenReturn(false); - writeContext.flushChannel(); - - verify(listener, times(0)).accept(null, null); - assertTrue(writeContext.hasQueuedWriteOps()); - } - - @SuppressWarnings("unchecked") - public void testMultipleWritesPartialFlushes() throws IOException { - assertFalse(writeContext.hasQueuedWriteOps()); - - BiConsumer listener2 = mock(BiConsumer.class); - WriteOperation writeOperation1 = mock(WriteOperation.class); - WriteOperation writeOperation2 = mock(WriteOperation.class); - when(writeOperation1.getListener()).thenReturn(listener); - when(writeOperation2.getListener()).thenReturn(listener2); - writeContext.queueWriteOperations(writeOperation1); - writeContext.queueWriteOperations(writeOperation2); - - assertTrue(writeContext.hasQueuedWriteOps()); - - when(writeOperation1.isFullyFlushed()).thenReturn(true); - when(writeOperation2.isFullyFlushed()).thenReturn(false); - writeContext.flushChannel(); - - verify(selector).executeListener(listener, null); - verify(listener2, times(0)).accept(null, null); - assertTrue(writeContext.hasQueuedWriteOps()); - - when(writeOperation2.isFullyFlushed()).thenReturn(true); - - writeContext.flushChannel(); - - verify(selector).executeListener(listener2, null); - assertFalse(writeContext.hasQueuedWriteOps()); - } - - public void testWhenIOExceptionThrownListenerIsCalled() throws IOException { - assertFalse(writeContext.hasQueuedWriteOps()); - - WriteOperation writeOperation = mock(WriteOperation.class); - writeContext.queueWriteOperations(writeOperation); - - assertTrue(writeContext.hasQueuedWriteOps()); - - IOException exception = new IOException(); - when(writeOperation.flush()).thenThrow(exception); - when(writeOperation.getListener()).thenReturn(listener); - expectThrows(IOException.class, () -> writeContext.flushChannel()); - - verify(selector).executeFailedListener(listener, exception); - assertFalse(writeContext.hasQueuedWriteOps()); - } - - private byte[] generateBytes(int n) { - n += 10; - byte[] bytes = new byte[n]; - for (int i = 0; i < n; ++i) { - bytes[i] = randomByte(); - } - return bytes; - } -} diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java index c1183af4e5b2e..1c8a8a130ccfa 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java @@ -28,7 +28,6 @@ import java.net.InetSocketAddress; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; -import java.util.function.BiConsumer; import static org.mockito.Matchers.any; import static org.mockito.Matchers.same; @@ -139,7 +138,7 @@ private static class TestChannelFactory extends ChannelFactory() { - @Override - public void onResponse(Void o) { - isClosed.set(true); - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - isClosed.set(true); - latch.countDown(); - } - })); - - assertTrue(channel.isOpen()); - assertFalse(closedRawChannel.get()); - assertFalse(isClosed.get()); - - PlainActionFuture closeFuture = PlainActionFuture.newFuture(); - channel.addCloseListener(ActionListener.toBiConsumer(closeFuture)); - channel.close(); - closeFuture.actionGet(); - - - assertTrue(closedRawChannel.get()); - assertFalse(channel.isOpen()); - latch.await(); - assertTrue(isClosed.get()); - } - - private class DoNotCloseServerChannel extends DoNotRegisterServerChannel { - - private DoNotCloseServerChannel(ServerSocketChannel channel, ChannelFactory channelFactory, AcceptingSelector selector) - throws IOException { - super(channel, channelFactory, selector); - } - - @Override - void closeRawChannel() throws IOException { - closedRawChannel.set(true); + try (ServerSocketChannel rawChannel = ServerSocketChannel.open()) { + NioServerSocketChannel channel = new NioServerSocketChannel(rawChannel, mock(ChannelFactory.class), selector); + channel.setContext(new ServerChannelContext(channel, mock(Consumer.class), mock(BiConsumer.class))); + channel.addCloseListener(ActionListener.toBiConsumer(new ActionListener() { + @Override + public void onResponse(Void o) { + isClosed.set(true); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + isClosed.set(true); + latch.countDown(); + } + })); + + assertTrue(channel.isOpen()); + assertTrue(rawChannel.isOpen()); + assertFalse(isClosed.get()); + + PlainActionFuture closeFuture = PlainActionFuture.newFuture(); + channel.addCloseListener(ActionListener.toBiConsumer(closeFuture)); + selector.queueChannelClose(channel); + closeFuture.actionGet(); + + + assertFalse(rawChannel.isOpen()); + assertFalse(channel.isOpen()); + latch.await(); + assertTrue(isClosed.get()); } } } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioSocketChannelTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioSocketChannelTests.java index 6a32b11f18b0f..bbda9233bbb80 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioSocketChannelTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioSocketChannelTests.java @@ -41,7 +41,6 @@ public class NioSocketChannelTests extends ESTestCase { private SocketSelector selector; - private AtomicBoolean closedRawChannel; private Thread thread; @Before @@ -49,7 +48,6 @@ public class NioSocketChannelTests extends ESTestCase { public void startSelector() throws IOException { selector = new SocketSelector(new SocketEventHandler(logger)); thread = new Thread(selector::runLoop); - closedRawChannel = new AtomicBoolean(false); thread.start(); FutureUtils.get(selector.isRunningFuture()); } @@ -65,42 +63,46 @@ public void testClose() throws Exception { AtomicBoolean isClosed = new AtomicBoolean(false); CountDownLatch latch = new CountDownLatch(1); - NioSocketChannel socketChannel = new DoNotCloseChannel(mock(SocketChannel.class), selector); - socketChannel.setContexts(mock(ReadContext.class), mock(WriteContext.class), mock(BiConsumer.class)); - socketChannel.addCloseListener(ActionListener.toBiConsumer(new ActionListener() { - @Override - public void onResponse(Void o) { - isClosed.set(true); - latch.countDown(); - } - @Override - public void onFailure(Exception e) { - isClosed.set(true); - latch.countDown(); - } - })); - - assertTrue(socketChannel.isOpen()); - assertFalse(closedRawChannel.get()); - assertFalse(isClosed.get()); - - PlainActionFuture closeFuture = PlainActionFuture.newFuture(); - socketChannel.addCloseListener(ActionListener.toBiConsumer(closeFuture)); - socketChannel.close(); - closeFuture.actionGet(); - - assertTrue(closedRawChannel.get()); - assertFalse(socketChannel.isOpen()); - latch.await(); - assertTrue(isClosed.get()); + try(SocketChannel rawChannel = SocketChannel.open()) { + NioSocketChannel socketChannel = new NioSocketChannel(rawChannel, selector); + socketChannel.setContext(new BytesChannelContext(socketChannel, mock(BiConsumer.class), + mock(SocketChannelContext.ReadConsumer.class), InboundChannelBuffer.allocatingInstance())); + socketChannel.addCloseListener(ActionListener.toBiConsumer(new ActionListener() { + @Override + public void onResponse(Void o) { + isClosed.set(true); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + isClosed.set(true); + latch.countDown(); + } + })); + + assertTrue(socketChannel.isOpen()); + assertTrue(rawChannel.isOpen()); + assertFalse(isClosed.get()); + + PlainActionFuture closeFuture = PlainActionFuture.newFuture(); + socketChannel.addCloseListener(ActionListener.toBiConsumer(closeFuture)); + selector.queueChannelClose(socketChannel); + closeFuture.actionGet(); + + assertFalse(rawChannel.isOpen()); + assertFalse(socketChannel.isOpen()); + latch.await(); + assertTrue(isClosed.get()); + } } @SuppressWarnings("unchecked") public void testConnectSucceeds() throws Exception { SocketChannel rawChannel = mock(SocketChannel.class); when(rawChannel.finishConnect()).thenReturn(true); - NioSocketChannel socketChannel = new DoNotCloseChannel(rawChannel, selector); - socketChannel.setContexts(mock(ReadContext.class), mock(WriteContext.class), mock(BiConsumer.class)); + NioSocketChannel socketChannel = new DoNotRegisterChannel(rawChannel, selector); + socketChannel.setContext(mock(SocketChannelContext.class)); selector.scheduleForRegistration(socketChannel); PlainActionFuture connectFuture = PlainActionFuture.newFuture(); @@ -109,15 +111,14 @@ public void testConnectSucceeds() throws Exception { assertTrue(socketChannel.isConnectComplete()); assertTrue(socketChannel.isOpen()); - assertFalse(closedRawChannel.get()); } @SuppressWarnings("unchecked") public void testConnectFails() throws Exception { SocketChannel rawChannel = mock(SocketChannel.class); when(rawChannel.finishConnect()).thenThrow(new ConnectException()); - NioSocketChannel socketChannel = new DoNotCloseChannel(rawChannel, selector); - socketChannel.setContexts(mock(ReadContext.class), mock(WriteContext.class), mock(BiConsumer.class)); + NioSocketChannel socketChannel = new DoNotRegisterChannel(rawChannel, selector); + socketChannel.setContext(mock(SocketChannelContext.class)); selector.scheduleForRegistration(socketChannel); PlainActionFuture connectFuture = PlainActionFuture.newFuture(); @@ -129,16 +130,4 @@ public void testConnectFails() throws Exception { // Even if connection fails the channel is 'open' until close() is called assertTrue(socketChannel.isOpen()); } - - private class DoNotCloseChannel extends DoNotRegisterChannel { - - private DoNotCloseChannel(SocketChannel channel, SocketSelector selector) throws IOException { - super(channel, selector); - } - - @Override - void closeRawChannel() throws IOException { - closedRawChannel.set(true); - } - } } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java index 2898cf18d5b9d..d74214636dbdd 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java @@ -28,8 +28,10 @@ import java.nio.channels.SelectionKey; import java.nio.channels.SocketChannel; import java.util.function.BiConsumer; +import java.util.function.Supplier; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -39,7 +41,6 @@ public class SocketEventHandlerTests extends ESTestCase { private SocketEventHandler handler; private NioSocketChannel channel; - private ReadContext readContext; private SocketChannel rawChannel; @Before @@ -50,21 +51,36 @@ public void setUpHandler() throws IOException { handler = new SocketEventHandler(logger); rawChannel = mock(SocketChannel.class); channel = new DoNotRegisterChannel(rawChannel, socketSelector); - readContext = mock(ReadContext.class); when(rawChannel.finishConnect()).thenReturn(true); - channel.setContexts(readContext, new BytesWriteContext(channel), exceptionHandler); + InboundChannelBuffer buffer = InboundChannelBuffer.allocatingInstance(); + channel.setContext(new BytesChannelContext(channel, exceptionHandler, mock(SocketChannelContext.ReadConsumer.class), buffer)); channel.register(); channel.finishConnect(); when(socketSelector.isOnCurrentThread()).thenReturn(true); } + public void testRegisterCallsContext() throws IOException { + NioSocketChannel channel = mock(NioSocketChannel.class); + SocketChannelContext channelContext = mock(SocketChannelContext.class); + when(channel.getContext()).thenReturn(channelContext); + when(channel.getSelectionKey()).thenReturn(new TestSelectionKey(0)); + handler.handleRegistration(channel); + verify(channelContext).channelRegistered(); + } + public void testRegisterAddsOP_CONNECTAndOP_READInterest() throws IOException { handler.handleRegistration(channel); assertEquals(SelectionKey.OP_READ | SelectionKey.OP_CONNECT, channel.getSelectionKey().interestOps()); } + public void testRegisterWithPendingWritesAddsOP_CONNECTAndOP_READAndOP_WRITEInterest() throws IOException { + channel.getContext().queueWriteOperation(mock(BytesWriteOperation.class)); + handler.handleRegistration(channel); + assertEquals(SelectionKey.OP_READ | SelectionKey.OP_CONNECT | SelectionKey.OP_WRITE, channel.getSelectionKey().interestOps()); + } + public void testRegistrationExceptionCallsExceptionHandler() throws IOException { CancelledKeyException exception = new CancelledKeyException(); handler.registrationException(channel, exception); @@ -83,79 +99,75 @@ public void testConnectExceptionCallsExceptionHandler() throws IOException { verify(exceptionHandler).accept(channel, exception); } - public void testHandleReadDelegatesToReadContext() throws IOException { - when(readContext.read()).thenReturn(1); + public void testHandleReadDelegatesToContext() throws IOException { + NioSocketChannel channel = new DoNotRegisterChannel(rawChannel, mock(SocketSelector.class)); + SocketChannelContext context = mock(SocketChannelContext.class); + channel.setContext(context); + when(context.read()).thenReturn(1); handler.handleRead(channel); - - verify(readContext).read(); + verify(context).read(); } - public void testHandleReadMarksChannelForCloseIfPeerClosed() throws IOException { - NioSocketChannel nioSocketChannel = mock(NioSocketChannel.class); - when(nioSocketChannel.getReadContext()).thenReturn(readContext); - when(readContext.read()).thenReturn(-1); - - handler.handleRead(nioSocketChannel); - - verify(nioSocketChannel).closeFromSelector(); - } - - public void testReadExceptionCallsExceptionHandler() throws IOException { + public void testReadExceptionCallsExceptionHandler() { IOException exception = new IOException(); handler.readException(channel, exception); verify(exceptionHandler).accept(channel, exception); } - @SuppressWarnings("unchecked") - public void testHandleWriteWithCompleteFlushRemovesOP_WRITEInterest() throws IOException { - SelectionKey selectionKey = channel.getSelectionKey(); - setWriteAndRead(channel); - assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, selectionKey.interestOps()); + public void testWriteExceptionCallsExceptionHandler() { + IOException exception = new IOException(); + handler.writeException(channel, exception); + verify(exceptionHandler).accept(channel, exception); + } - ByteBuffer[] buffers = {ByteBuffer.allocate(1)}; - channel.getWriteContext().queueWriteOperations(new WriteOperation(channel, buffers, mock(BiConsumer.class))); + public void testPostHandlingCallWillCloseTheChannelIfReady() throws IOException { + NioSocketChannel channel = mock(NioSocketChannel.class); + SocketChannelContext context = mock(SocketChannelContext.class); + when(channel.getSelectionKey()).thenReturn(new TestSelectionKey(0)); - when(rawChannel.write(buffers[0])).thenReturn(1); - handler.handleWrite(channel); + when(channel.getContext()).thenReturn(context); + when(context.selectorShouldClose()).thenReturn(true); + handler.postHandling(channel); - assertEquals(SelectionKey.OP_READ, selectionKey.interestOps()); + verify(context).closeFromSelector(); } - @SuppressWarnings("unchecked") - public void testHandleWriteWithInCompleteFlushLeavesOP_WRITEInterest() throws IOException { - SelectionKey selectionKey = channel.getSelectionKey(); - setWriteAndRead(channel); - assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, selectionKey.interestOps()); - - ByteBuffer[] buffers = {ByteBuffer.allocate(1)}; - channel.getWriteContext().queueWriteOperations(new WriteOperation(channel, buffers, mock(BiConsumer.class))); + public void testPostHandlingCallWillNotCloseTheChannelIfNotReady() throws IOException { + NioSocketChannel channel = mock(NioSocketChannel.class); + SocketChannelContext context = mock(SocketChannelContext.class); + when(channel.getSelectionKey()).thenReturn(new TestSelectionKey(0)); - when(rawChannel.write(buffers[0])).thenReturn(0); - handler.handleWrite(channel); + when(channel.getContext()).thenReturn(context); + when(context.selectorShouldClose()).thenReturn(false); + handler.postHandling(channel); - assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, selectionKey.interestOps()); + verify(channel, times(0)).closeFromSelector(); } - public void testHandleWriteWithNoOpsRemovesOP_WRITEInterest() throws IOException { - SelectionKey selectionKey = channel.getSelectionKey(); - setWriteAndRead(channel); - assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, channel.getSelectionKey().interestOps()); + public void testPostHandlingWillAddWriteIfNecessary() throws IOException { + NioSocketChannel channel = new DoNotRegisterChannel(rawChannel, mock(SocketSelector.class)); + channel.setSelectionKey(new TestSelectionKey(SelectionKey.OP_READ)); + SocketChannelContext context = mock(SocketChannelContext.class); + channel.setContext(context); - handler.handleWrite(channel); + when(context.hasQueuedWriteOps()).thenReturn(true); - assertEquals(SelectionKey.OP_READ, selectionKey.interestOps()); + assertEquals(SelectionKey.OP_READ, channel.getSelectionKey().interestOps()); + handler.postHandling(channel); + assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, channel.getSelectionKey().interestOps()); } - private void setWriteAndRead(NioChannel channel) { - SelectionKeyUtils.setConnectAndReadInterested(channel); - SelectionKeyUtils.removeConnectInterested(channel); - SelectionKeyUtils.setWriteInterested(channel); - } + public void testPostHandlingWillRemoveWriteIfNecessary() throws IOException { + NioSocketChannel channel = new DoNotRegisterChannel(rawChannel, mock(SocketSelector.class)); + channel.setSelectionKey(new TestSelectionKey(SelectionKey.OP_READ | SelectionKey.OP_WRITE)); + SocketChannelContext context = mock(SocketChannelContext.class); + channel.setContext(context); - public void testWriteExceptionCallsExceptionHandler() throws IOException { - IOException exception = new IOException(); - handler.writeException(channel, exception); - verify(exceptionHandler).accept(channel, exception); + when(context.hasQueuedWriteOps()).thenReturn(false); + + assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, channel.getSelectionKey().interestOps()); + handler.postHandling(channel); + assertEquals(SelectionKey.OP_READ, channel.getSelectionKey().interestOps()); } } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java index e50da352623b5..5992244b2f930 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java @@ -49,7 +49,7 @@ public class SocketSelectorTests extends ESTestCase { private SocketEventHandler eventHandler; private NioSocketChannel channel; private TestSelectionKey selectionKey; - private WriteContext writeContext; + private SocketChannelContext channelContext; private BiConsumer listener; private ByteBuffer[] buffers = {ByteBuffer.allocate(1)}; private Selector rawSelector; @@ -60,7 +60,7 @@ public void setUp() throws Exception { super.setUp(); eventHandler = mock(SocketEventHandler.class); channel = mock(NioSocketChannel.class); - writeContext = mock(WriteContext.class); + channelContext = mock(SocketChannelContext.class); listener = mock(BiConsumer.class); selectionKey = new TestSelectionKey(0); selectionKey.attach(channel); @@ -71,7 +71,7 @@ public void setUp() throws Exception { when(channel.isOpen()).thenReturn(true); when(channel.getSelectionKey()).thenReturn(selectionKey); - when(channel.getWriteContext()).thenReturn(writeContext); + when(channel.getContext()).thenReturn(channelContext); when(channel.isConnectComplete()).thenReturn(true); when(channel.getSelector()).thenReturn(socketSelector); } @@ -129,75 +129,71 @@ public void testConnectIncompleteWillNotNotify() throws Exception { public void testQueueWriteWhenNotRunning() throws Exception { socketSelector.close(); - socketSelector.queueWrite(new WriteOperation(channel, buffers, listener)); + socketSelector.queueWrite(new BytesWriteOperation(channel, buffers, listener)); verify(listener).accept(isNull(Void.class), any(ClosedSelectorException.class)); } - public void testQueueWriteChannelIsNoLongerWritable() throws Exception { - WriteOperation writeOperation = new WriteOperation(channel, buffers, listener); + public void testQueueWriteChannelIsClosed() throws Exception { + BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener); socketSelector.queueWrite(writeOperation); - when(channel.isWritable()).thenReturn(false); + when(channel.isOpen()).thenReturn(false); socketSelector.preSelect(); - verify(writeContext, times(0)).queueWriteOperations(writeOperation); + verify(channelContext, times(0)).queueWriteOperation(writeOperation); verify(listener).accept(isNull(Void.class), any(ClosedChannelException.class)); } public void testQueueWriteSelectionKeyThrowsException() throws Exception { SelectionKey selectionKey = mock(SelectionKey.class); - WriteOperation writeOperation = new WriteOperation(channel, buffers, listener); + BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener); CancelledKeyException cancelledKeyException = new CancelledKeyException(); socketSelector.queueWrite(writeOperation); - when(channel.isWritable()).thenReturn(true); when(channel.getSelectionKey()).thenReturn(selectionKey); when(selectionKey.interestOps(anyInt())).thenThrow(cancelledKeyException); socketSelector.preSelect(); - verify(writeContext, times(0)).queueWriteOperations(writeOperation); + verify(channelContext, times(0)).queueWriteOperation(writeOperation); verify(listener).accept(null, cancelledKeyException); } public void testQueueWriteSuccessful() throws Exception { - WriteOperation writeOperation = new WriteOperation(channel, buffers, listener); + BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener); socketSelector.queueWrite(writeOperation); assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) == 0); - when(channel.isWritable()).thenReturn(true); socketSelector.preSelect(); - verify(writeContext).queueWriteOperations(writeOperation); + verify(channelContext).queueWriteOperation(writeOperation); assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) != 0); } public void testQueueDirectlyInChannelBufferSuccessful() throws Exception { - WriteOperation writeOperation = new WriteOperation(channel, buffers, listener); + BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener); assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) == 0); - when(channel.isWritable()).thenReturn(true); socketSelector.queueWriteInChannelBuffer(writeOperation); - verify(writeContext).queueWriteOperations(writeOperation); + verify(channelContext).queueWriteOperation(writeOperation); assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) != 0); } public void testQueueDirectlyInChannelBufferSelectionKeyThrowsException() throws Exception { SelectionKey selectionKey = mock(SelectionKey.class); - WriteOperation writeOperation = new WriteOperation(channel, buffers, listener); + BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener); CancelledKeyException cancelledKeyException = new CancelledKeyException(); - when(channel.isWritable()).thenReturn(true); when(channel.getSelectionKey()).thenReturn(selectionKey); when(selectionKey.interestOps(anyInt())).thenThrow(cancelledKeyException); socketSelector.queueWriteInChannelBuffer(writeOperation); - verify(writeContext, times(0)).queueWriteOperations(writeOperation); + verify(channelContext, times(0)).queueWriteOperation(writeOperation); verify(listener).accept(null, cancelledKeyException); } @@ -285,6 +281,16 @@ public void testReadEventWithException() throws Exception { verify(eventHandler).readException(channel, ioException); } + public void testWillCallPostHandleAfterChannelHandling() throws Exception { + selectionKey.setReadyOps(SelectionKey.OP_WRITE | SelectionKey.OP_READ); + + socketSelector.processKey(selectionKey); + + verify(eventHandler).handleWrite(channel); + verify(eventHandler).handleRead(channel); + verify(eventHandler).postHandling(channel); + } + public void testCleanup() throws Exception { NioSocketChannel unRegisteredChannel = mock(NioSocketChannel.class); @@ -292,7 +298,7 @@ public void testCleanup() throws Exception { socketSelector.preSelect(); - socketSelector.queueWrite(new WriteOperation(mock(NioSocketChannel.class), buffers, listener)); + socketSelector.queueWrite(new BytesWriteOperation(mock(NioSocketChannel.class), buffers, listener)); socketSelector.scheduleForRegistration(unRegisteredChannel); TestSelectionKey testSelectionKey = new TestSelectionKey(0); diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/WriteOperationTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/WriteOperationTests.java index da74269b8253a..59fb9cde4389c 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/WriteOperationTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/WriteOperationTests.java @@ -45,71 +45,58 @@ public void setFields() { } - public void testFlush() throws IOException { + public void testFullyFlushedMarker() { ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; - WriteOperation writeOp = new WriteOperation(channel, buffers, listener); + BytesWriteOperation writeOp = new BytesWriteOperation(channel, buffers, listener); - - when(channel.write(any(ByteBuffer[].class))).thenReturn(10); - - writeOp.flush(); + writeOp.incrementIndex(10); assertTrue(writeOp.isFullyFlushed()); } - public void testPartialFlush() throws IOException { + public void testPartiallyFlushedMarker() { ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; - WriteOperation writeOp = new WriteOperation(channel, buffers, listener); - - when(channel.write(any(ByteBuffer[].class))).thenReturn(5); + BytesWriteOperation writeOp = new BytesWriteOperation(channel, buffers, listener); - writeOp.flush(); + writeOp.incrementIndex(5); assertFalse(writeOp.isFullyFlushed()); } public void testMultipleFlushesWithCompositeBuffer() throws IOException { ByteBuffer[] buffers = {ByteBuffer.allocate(10), ByteBuffer.allocate(15), ByteBuffer.allocate(3)}; - WriteOperation writeOp = new WriteOperation(channel, buffers, listener); + BytesWriteOperation writeOp = new BytesWriteOperation(channel, buffers, listener); ArgumentCaptor buffersCaptor = ArgumentCaptor.forClass(ByteBuffer[].class); - when(channel.write(buffersCaptor.capture())).thenReturn(5) - .thenReturn(5) - .thenReturn(2) - .thenReturn(15) - .thenReturn(1); - - writeOp.flush(); - assertFalse(writeOp.isFullyFlushed()); - writeOp.flush(); + writeOp.incrementIndex(5); assertFalse(writeOp.isFullyFlushed()); - writeOp.flush(); - assertFalse(writeOp.isFullyFlushed()); - writeOp.flush(); - assertFalse(writeOp.isFullyFlushed()); - writeOp.flush(); - assertTrue(writeOp.isFullyFlushed()); - - List values = buffersCaptor.getAllValues(); - ByteBuffer[] byteBuffers = values.get(0); - assertEquals(3, byteBuffers.length); - assertEquals(10, byteBuffers[0].remaining()); - - byteBuffers = values.get(1); + ByteBuffer[] byteBuffers = writeOp.getBuffersToWrite(); assertEquals(3, byteBuffers.length); assertEquals(5, byteBuffers[0].remaining()); - byteBuffers = values.get(2); + writeOp.incrementIndex(5); + assertFalse(writeOp.isFullyFlushed()); + byteBuffers = writeOp.getBuffersToWrite(); assertEquals(2, byteBuffers.length); assertEquals(15, byteBuffers[0].remaining()); - byteBuffers = values.get(3); + writeOp.incrementIndex(2); + assertFalse(writeOp.isFullyFlushed()); + byteBuffers = writeOp.getBuffersToWrite(); assertEquals(2, byteBuffers.length); assertEquals(13, byteBuffers[0].remaining()); - byteBuffers = values.get(4); + writeOp.incrementIndex(15); + assertFalse(writeOp.isFullyFlushed()); + byteBuffers = writeOp.getBuffersToWrite(); assertEquals(1, byteBuffers.length); assertEquals(1, byteBuffers[0].remaining()); + + writeOp.incrementIndex(1); + assertTrue(writeOp.isFullyFlushed()); + byteBuffers = writeOp.getBuffersToWrite(); + assertEquals(1, byteBuffers.length); + assertEquals(0, byteBuffers[0].remaining()); } } diff --git a/modules/build.gradle b/modules/build.gradle index b3dbde24936e9..7f7e7e0965bc9 100644 --- a/modules/build.gradle +++ b/modules/build.gradle @@ -17,7 +17,7 @@ * under the License. */ -subprojects { +configure(subprojects.findAll { it.parent.path == project.path }) { group = 'org.elasticsearch.plugin' // for modules which publish client jars apply plugin: 'elasticsearch.esplugin' diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomReflectionObjectHandler.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomReflectionObjectHandler.java index eef9d7af8dd01..79319369489fd 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomReflectionObjectHandler.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomReflectionObjectHandler.java @@ -20,6 +20,7 @@ package org.elasticsearch.script.mustache; import com.github.mustachejava.reflect.ReflectionObjectHandler; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.iterable.Iterables; import java.lang.reflect.Array; @@ -154,4 +155,9 @@ public Iterator iterator() { } } + @Override + public String stringify(Object object) { + CollectionUtils.ensureNoSelfReferences(object); + return super.stringify(object); + } } diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index 0bd96725c66b4..d287d7ee02378 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -31,6 +31,7 @@ integTestCluster { dependencies { compile 'org.antlr:antlr4-runtime:4.5.3' compile 'org.ow2.asm:asm-debug-all:5.1' + compile project('spi') } dependencyLicenses { diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteContext.java b/modules/lang-painless/spi/build.gradle similarity index 66% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteContext.java rename to modules/lang-painless/spi/build.gradle index 39e69e8f9a94e..7e43a242a23a9 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteContext.java +++ b/modules/lang-painless/spi/build.gradle @@ -17,21 +17,24 @@ * under the License. */ -package org.elasticsearch.nio; - -import java.io.IOException; -import java.util.function.BiConsumer; - -public interface WriteContext { - - void sendMessage(Object message, BiConsumer listener); - - void queueWriteOperations(WriteOperation writeOperation); - - void flushChannel() throws IOException; - - boolean hasQueuedWriteOps(); - - void clearQueuedWriteOps(Exception e); +apply plugin: 'elasticsearch.build' +apply plugin: 'nebula.maven-base-publish' +apply plugin: 'nebula.maven-scm' + +group = 'org.elasticsearch.plugin' +archivesBaseName = 'elasticsearch-scripting-painless-spi' + +publishing { + publications { + nebula { + artifactId = archivesBaseName + } + } +} +dependencies { + compile "org.elasticsearch:elasticsearch:${version}" } + +// no tests...yet? +test.enabled = false diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java similarity index 100% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java rename to modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/Whitelist.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java similarity index 100% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/Whitelist.java rename to modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java similarity index 100% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java rename to modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java index 89f358d17e0c0..abba62de39c19 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java @@ -20,7 +20,7 @@ package org.elasticsearch.painless; import org.elasticsearch.painless.Definition.Cast; -import org.elasticsearch.painless.Definition.Type; +import org.elasticsearch.painless.Definition.def; import java.util.Objects; @@ -30,453 +30,448 @@ */ public final class AnalyzerCaster { - private Definition definition; - - public AnalyzerCaster(Definition definition) { - this.definition = definition; - } - - public Cast getLegalCast(Location location, Type actual, Type expected, boolean explicit, boolean internal) { + public static Cast getLegalCast(Location location, Class actual, Class expected, boolean explicit, boolean internal) { Objects.requireNonNull(actual); Objects.requireNonNull(expected); - if (actual.equals(expected)) { + if (actual == expected) { return null; } - if (actual.dynamic) { - if (expected.clazz == boolean.class) { - return Cast.unboxTo(definition.DefType, definition.BooleanType, explicit, definition.booleanType); - } else if (expected.clazz == byte.class) { - return Cast.unboxTo(definition.DefType, definition.ByteType, explicit, definition.byteType); - } else if (expected.clazz == short.class) { - return Cast.unboxTo(definition.DefType, definition.ShortType, explicit, definition.shortType); - } else if (expected.clazz == char.class) { - return Cast.unboxTo(definition.DefType, definition.CharacterType, explicit, definition.charType); - } else if (expected.clazz == int.class) { - return Cast.unboxTo(definition.DefType, definition.IntegerType, explicit, definition.intType); - } else if (expected.clazz == long.class) { - return Cast.unboxTo(definition.DefType, definition.LongType, explicit, definition.longType); - } else if (expected.clazz == float.class) { - return Cast.unboxTo(definition.DefType, definition.FloatType, explicit, definition.floatType); - } else if (expected.clazz == double.class) { - return Cast.unboxTo(definition.DefType, definition.DoubleType, explicit, definition.doubleType); + if (actual == def.class) { + if (expected == boolean.class) { + return Cast.unboxTo(def.class, Boolean.class, explicit, boolean.class); + } else if (expected == byte.class) { + return Cast.unboxTo(def.class, Byte.class, explicit, byte.class); + } else if (expected == short.class) { + return Cast.unboxTo(def.class, Short.class, explicit, short.class); + } else if (expected == char.class) { + return Cast.unboxTo(def.class, Character.class, explicit, char.class); + } else if (expected == int.class) { + return Cast.unboxTo(def.class, Integer.class, explicit, int.class); + } else if (expected == long.class) { + return Cast.unboxTo(def.class, Long.class, explicit, long.class); + } else if (expected == float.class) { + return Cast.unboxTo(def.class, Float.class, explicit, float.class); + } else if (expected == double.class) { + return Cast.unboxTo(def.class, Double.class, explicit, double.class); } - } else if (actual.clazz == Object.class) { - if (expected.clazz == byte.class && explicit && internal) { - return Cast.unboxTo(definition.ObjectType, definition.ByteType, true, definition.byteType); - } else if (expected.clazz == short.class && explicit && internal) { - return Cast.unboxTo(definition.ObjectType, definition.ShortType, true, definition.shortType); - } else if (expected.clazz == char.class && explicit && internal) { - return Cast.unboxTo(definition.ObjectType, definition.CharacterType, true, definition.charType); - } else if (expected.clazz == int.class && explicit && internal) { - return Cast.unboxTo(definition.ObjectType, definition.IntegerType, true, definition.intType); - } else if (expected.clazz == long.class && explicit && internal) { - return Cast.unboxTo(definition.ObjectType, definition.LongType, true, definition.longType); - } else if (expected.clazz == float.class && explicit && internal) { - return Cast.unboxTo(definition.ObjectType, definition.FloatType, true, definition.floatType); - } else if (expected.clazz == double.class && explicit && internal) { - return Cast.unboxTo(definition.ObjectType, definition.DoubleType, true, definition.doubleType); + } else if (actual == Object.class) { + if (expected == byte.class && explicit && internal) { + return Cast.unboxTo(Object.class, Byte.class, true, byte.class); + } else if (expected == short.class && explicit && internal) { + return Cast.unboxTo(Object.class, Short.class, true, short.class); + } else if (expected == char.class && explicit && internal) { + return Cast.unboxTo(Object.class, Character.class, true, char.class); + } else if (expected == int.class && explicit && internal) { + return Cast.unboxTo(Object.class, Integer.class, true, int.class); + } else if (expected == long.class && explicit && internal) { + return Cast.unboxTo(Object.class, Long.class, true, long.class); + } else if (expected == float.class && explicit && internal) { + return Cast.unboxTo(Object.class, Float.class, true, float.class); + } else if (expected == double.class && explicit && internal) { + return Cast.unboxTo(Object.class, Double.class, true, double.class); } - } else if (actual.clazz == Number.class) { - if (expected.clazz == byte.class && explicit && internal) { - return Cast.unboxTo(definition.NumberType, definition.ByteType, true, definition.byteType); - } else if (expected.clazz == short.class && explicit && internal) { - return Cast.unboxTo(definition.NumberType, definition.ShortType, true, definition.shortType); - } else if (expected.clazz == char.class && explicit && internal) { - return Cast.unboxTo(definition.NumberType, definition.CharacterType, true, definition.charType); - } else if (expected.clazz == int.class && explicit && internal) { - return Cast.unboxTo(definition.NumberType, definition.IntegerType, true, definition.intType); - } else if (expected.clazz == long.class && explicit && internal) { - return Cast.unboxTo(definition.NumberType, definition.LongType, true, definition.longType); - } else if (expected.clazz == float.class && explicit && internal) { - return Cast.unboxTo(definition.NumberType, definition.FloatType, true, definition.floatType); - } else if (expected.clazz == double.class && explicit && internal) { - return Cast.unboxTo(definition.NumberType, definition.DoubleType, true, definition.doubleType); + } else if (actual == Number.class) { + if (expected == byte.class && explicit && internal) { + return Cast.unboxTo(Number.class, Byte.class, true, byte.class); + } else if (expected == short.class && explicit && internal) { + return Cast.unboxTo(Number.class, Short.class, true, short.class); + } else if (expected == char.class && explicit && internal) { + return Cast.unboxTo(Number.class, Character.class, true, char.class); + } else if (expected == int.class && explicit && internal) { + return Cast.unboxTo(Number.class, Integer.class, true, int.class); + } else if (expected == long.class && explicit && internal) { + return Cast.unboxTo(Number.class, Long.class, true, long.class); + } else if (expected == float.class && explicit && internal) { + return Cast.unboxTo(Number.class, Float.class, true, float.class); + } else if (expected == double.class && explicit && internal) { + return Cast.unboxTo(Number.class, Double.class, true, double.class); } - } else if (actual.clazz == String.class) { - if (expected.clazz == char.class && explicit) { - return Cast.standard(definition.StringType, definition.charType, true); + } else if (actual == String.class) { + if (expected == char.class && explicit) { + return Cast.standard(String.class, char.class, true); } - } else if (actual.clazz == boolean.class) { - if (expected.dynamic) { - return Cast.boxFrom(definition.BooleanType, definition.DefType, explicit, definition.booleanType); - } else if (expected.clazz == Object.class && internal) { - return Cast.boxFrom(definition.BooleanType, definition.ObjectType, explicit, definition.booleanType); - } else if (expected.clazz == Boolean.class && internal) { - return Cast.boxTo(definition.booleanType, definition.booleanType, explicit, definition.booleanType); + } else if (actual == boolean.class) { + if (expected == def.class) { + return Cast.boxFrom(Boolean.class, def.class, explicit, boolean.class); + } else if (expected == Object.class && internal) { + return Cast.boxFrom(Boolean.class, Object.class, explicit, boolean.class); + } else if (expected == Boolean.class && internal) { + return Cast.boxTo(boolean.class, boolean.class, explicit, boolean.class); } - } else if (actual.clazz == byte.class) { - if (expected.dynamic) { - return Cast.boxFrom(definition.ByteType, definition.DefType, explicit, definition.byteType); - } else if (expected.clazz == Object.class && internal) { - return Cast.boxFrom(definition.ByteType, definition.ObjectType, explicit, definition.byteType); - } else if (expected.clazz == Number.class && internal) { - return Cast.boxFrom(definition.ByteType, definition.NumberType, explicit, definition.byteType); - } else if (expected.clazz == short.class) { - return Cast.standard(definition.byteType, definition.shortType, explicit); - } else if (expected.clazz == char.class && explicit) { - return Cast.standard(definition.byteType, definition.charType, true); - } else if (expected.clazz == int.class) { - return Cast.standard(definition.byteType, definition.intType, explicit); - } else if (expected.clazz == long.class) { - return Cast.standard(definition.byteType, definition.longType, explicit); - } else if (expected.clazz == float.class) { - return Cast.standard(definition.byteType, definition.floatType, explicit); - } else if (expected.clazz == double.class) { - return Cast.standard(definition.byteType, definition.doubleType, explicit); - } else if (expected.clazz == Byte.class && internal) { - return Cast.boxTo(definition.byteType, definition.byteType, explicit, definition.byteType); - } else if (expected.clazz == Short.class && internal) { - return Cast.boxTo(definition.byteType, definition.shortType, explicit, definition.shortType); - } else if (expected.clazz == Character.class && explicit && internal) { - return Cast.boxTo(definition.byteType, definition.charType, true, definition.charType); - } else if (expected.clazz == Integer.class && internal) { - return Cast.boxTo(definition.byteType, definition.intType, explicit, definition.intType); - } else if (expected.clazz == Long.class && internal) { - return Cast.boxTo(definition.byteType, definition.longType, explicit, definition.longType); - } else if (expected.clazz == Float.class && internal) { - return Cast.boxTo(definition.byteType, definition.floatType, explicit, definition.floatType); - } else if (expected.clazz == Double.class && internal) { - return Cast.boxTo(definition.byteType, definition.doubleType, explicit, definition.doubleType); + } else if (actual == byte.class) { + if (expected == def.class) { + return Cast.boxFrom(Byte.class, def.class, explicit, byte.class); + } else if (expected == Object.class && internal) { + return Cast.boxFrom(Byte.class, Object.class, explicit, byte.class); + } else if (expected == Number.class && internal) { + return Cast.boxFrom(Byte.class, Number.class, explicit, byte.class); + } else if (expected == short.class) { + return Cast.standard(byte.class, short.class, explicit); + } else if (expected == char.class && explicit) { + return Cast.standard(byte.class, char.class, true); + } else if (expected == int.class) { + return Cast.standard(byte.class, int.class, explicit); + } else if (expected == long.class) { + return Cast.standard(byte.class, long.class, explicit); + } else if (expected == float.class) { + return Cast.standard(byte.class, float.class, explicit); + } else if (expected == double.class) { + return Cast.standard(byte.class, double.class, explicit); + } else if (expected == Byte.class && internal) { + return Cast.boxTo(byte.class, byte.class, explicit, byte.class); + } else if (expected == Short.class && internal) { + return Cast.boxTo(byte.class, short.class, explicit, short.class); + } else if (expected == Character.class && explicit && internal) { + return Cast.boxTo(byte.class, char.class, true, char.class); + } else if (expected == Integer.class && internal) { + return Cast.boxTo(byte.class, int.class, explicit, int.class); + } else if (expected == Long.class && internal) { + return Cast.boxTo(byte.class, long.class, explicit, long.class); + } else if (expected == Float.class && internal) { + return Cast.boxTo(byte.class, float.class, explicit, float.class); + } else if (expected == Double.class && internal) { + return Cast.boxTo(byte.class, double.class, explicit, double.class); } - } else if (actual.clazz == short.class) { - if (expected.dynamic) { - return Cast.boxFrom(definition.ShortType, definition.DefType, explicit, definition.shortType); - } else if (expected.clazz == Object.class && internal) { - return Cast.boxFrom(definition.ShortType, definition.ObjectType, explicit, definition.shortType); - } else if (expected.clazz == Number.class && internal) { - return Cast.boxFrom(definition.ShortType, definition.NumberType, explicit, definition.shortType); - } else if (expected.clazz == byte.class && explicit) { - return Cast.standard(definition.shortType, definition.byteType, true); - } else if (expected.clazz == char.class && explicit) { - return Cast.standard(definition.shortType, definition.charType, true); - } else if (expected.clazz == int.class) { - return Cast.standard(definition.shortType, definition.intType, explicit); - } else if (expected.clazz == long.class) { - return Cast.standard(definition.shortType, definition.longType, explicit); - } else if (expected.clazz == float.class) { - return Cast.standard(definition.shortType, definition.floatType, explicit); - } else if (expected.clazz == double.class) { - return Cast.standard(definition.shortType, definition.doubleType, explicit); - } else if (expected.clazz == Byte.class && explicit && internal) { - return Cast.boxTo(definition.shortType, definition.byteType, true, definition.byteType); - } else if (expected.clazz == Short.class && internal) { - return Cast.boxTo(definition.shortType, definition.shortType, explicit, definition.shortType); - } else if (expected.clazz == Character.class && explicit && internal) { - return Cast.boxTo(definition.shortType, definition.charType, true, definition.charType); - } else if (expected.clazz == Integer.class && internal) { - return Cast.boxTo(definition.shortType, definition.intType, explicit, definition.intType); - } else if (expected.clazz == Long.class && internal) { - return Cast.boxTo(definition.shortType, definition.longType, explicit, definition.longType); - } else if (expected.clazz == Float.class && internal) { - return Cast.boxTo(definition.shortType, definition.floatType, explicit, definition.floatType); - } else if (expected.clazz == Double.class && internal) { - return Cast.boxTo(definition.shortType, definition.doubleType, explicit, definition.doubleType); + } else if (actual == short.class) { + if (expected == def.class) { + return Cast.boxFrom(Short.class, def.class, explicit, short.class); + } else if (expected == Object.class && internal) { + return Cast.boxFrom(Short.class, Object.class, explicit, short.class); + } else if (expected == Number.class && internal) { + return Cast.boxFrom(Short.class, Number.class, explicit, short.class); + } else if (expected == byte.class && explicit) { + return Cast.standard(short.class, byte.class, true); + } else if (expected == char.class && explicit) { + return Cast.standard(short.class, char.class, true); + } else if (expected == int.class) { + return Cast.standard(short.class, int.class, explicit); + } else if (expected == long.class) { + return Cast.standard(short.class, long.class, explicit); + } else if (expected == float.class) { + return Cast.standard(short.class, float.class, explicit); + } else if (expected == double.class) { + return Cast.standard(short.class, double.class, explicit); + } else if (expected == Byte.class && explicit && internal) { + return Cast.boxTo(short.class, byte.class, true, byte.class); + } else if (expected == Short.class && internal) { + return Cast.boxTo(short.class, short.class, explicit, short.class); + } else if (expected == Character.class && explicit && internal) { + return Cast.boxTo(short.class, char.class, true, char.class); + } else if (expected == Integer.class && internal) { + return Cast.boxTo(short.class, int.class, explicit, int.class); + } else if (expected == Long.class && internal) { + return Cast.boxTo(short.class, long.class, explicit, long.class); + } else if (expected == Float.class && internal) { + return Cast.boxTo(short.class, float.class, explicit, float.class); + } else if (expected == Double.class && internal) { + return Cast.boxTo(short.class, double.class, explicit, double.class); } - } else if (actual.clazz == char.class) { - if (expected.dynamic) { - return Cast.boxFrom(definition.CharacterType, definition.DefType, explicit, definition.charType); - } else if (expected.clazz == Object.class && internal) { - return Cast.boxFrom(definition.CharacterType, definition.ObjectType, explicit, definition.charType); - } else if (expected.clazz == Number.class && internal) { - return Cast.boxFrom(definition.CharacterType, definition.NumberType, explicit, definition.charType); - } else if (expected.clazz == String.class) { - return Cast.standard(definition.charType, definition.StringType, explicit); - } else if (expected.clazz == byte.class && explicit) { - return Cast.standard(definition.charType, definition.byteType, true); - } else if (expected.clazz == short.class && explicit) { - return Cast.standard(definition.charType, definition.shortType, true); - } else if (expected.clazz == int.class) { - return Cast.standard(definition.charType, definition.intType, explicit); - } else if (expected.clazz == long.class) { - return Cast.standard(definition.charType, definition.longType, explicit); - } else if (expected.clazz == float.class) { - return Cast.standard(definition.charType, definition.floatType, explicit); - } else if (expected.clazz == double.class) { - return Cast.standard(definition.charType, definition.doubleType, explicit); - } else if (expected.clazz == Byte.class && explicit && internal) { - return Cast.boxTo(definition.charType, definition.byteType, true, definition.byteType); - } else if (expected.clazz == Short.class && internal) { - return Cast.boxTo(definition.charType, definition.shortType, explicit, definition.shortType); - } else if (expected.clazz == Character.class && internal) { - return Cast.boxTo(definition.charType, definition.charType, true, definition.charType); - } else if (expected.clazz == Integer.class && internal) { - return Cast.boxTo(definition.charType, definition.intType, explicit, definition.intType); - } else if (expected.clazz == Long.class && internal) { - return Cast.boxTo(definition.charType, definition.longType, explicit, definition.longType); - } else if (expected.clazz == Float.class && internal) { - return Cast.boxTo(definition.charType, definition.floatType, explicit, definition.floatType); - } else if (expected.clazz == Double.class && internal) { - return Cast.boxTo(definition.charType, definition.doubleType, explicit, definition.doubleType); + } else if (actual == char.class) { + if (expected == def.class) { + return Cast.boxFrom(Character.class, def.class, explicit, char.class); + } else if (expected == Object.class && internal) { + return Cast.boxFrom(Character.class, Object.class, explicit, char.class); + } else if (expected == Number.class && internal) { + return Cast.boxFrom(Character.class, Number.class, explicit, char.class); + } else if (expected == String.class) { + return Cast.standard(char.class, String.class, explicit); + } else if (expected == byte.class && explicit) { + return Cast.standard(char.class, byte.class, true); + } else if (expected == short.class && explicit) { + return Cast.standard(char.class, short.class, true); + } else if (expected == int.class) { + return Cast.standard(char.class, int.class, explicit); + } else if (expected == long.class) { + return Cast.standard(char.class, long.class, explicit); + } else if (expected == float.class) { + return Cast.standard(char.class, float.class, explicit); + } else if (expected == double.class) { + return Cast.standard(char.class, double.class, explicit); + } else if (expected == Byte.class && explicit && internal) { + return Cast.boxTo(char.class, byte.class, true, byte.class); + } else if (expected == Short.class && internal) { + return Cast.boxTo(char.class, short.class, explicit, short.class); + } else if (expected == Character.class && internal) { + return Cast.boxTo(char.class, char.class, true, char.class); + } else if (expected == Integer.class && internal) { + return Cast.boxTo(char.class, int.class, explicit, int.class); + } else if (expected == Long.class && internal) { + return Cast.boxTo(char.class, long.class, explicit, long.class); + } else if (expected == Float.class && internal) { + return Cast.boxTo(char.class, float.class, explicit, float.class); + } else if (expected == Double.class && internal) { + return Cast.boxTo(char.class, double.class, explicit, double.class); } - } else if (actual.clazz == int.class) { - if (expected.dynamic) { - return Cast.boxFrom(definition.IntegerType, definition.DefType, explicit, definition.intType); - } else if (expected.clazz == Object.class && internal) { - return Cast.boxFrom(definition.IntegerType, definition.ObjectType, explicit, definition.intType); - } else if (expected.clazz == Number.class && internal) { - return Cast.boxFrom(definition.IntegerType, definition.NumberType, explicit, definition.intType); - } else if (expected.clazz == byte.class && explicit) { - return Cast.standard(definition.intType, definition.byteType, true); - } else if (expected.clazz == char.class && explicit) { - return Cast.standard(definition.intType, definition.charType, true); - } else if (expected.clazz == short.class && explicit) { - return Cast.standard(definition.intType, definition.shortType, true); - } else if (expected.clazz == long.class) { - return Cast.standard(definition.intType, definition.longType, explicit); - } else if (expected.clazz == float.class) { - return Cast.standard(definition.intType, definition.floatType, explicit); - } else if (expected.clazz == double.class) { - return Cast.standard(definition.intType, definition.doubleType, explicit); - } else if (expected.clazz == Byte.class && explicit && internal) { - return Cast.boxTo(definition.intType, definition.byteType, true, definition.byteType); - } else if (expected.clazz == Short.class && explicit && internal) { - return Cast.boxTo(definition.intType, definition.shortType, true, definition.shortType); - } else if (expected.clazz == Character.class && explicit && internal) { - return Cast.boxTo(definition.intType, definition.charType, true, definition.charType); - } else if (expected.clazz == Integer.class && internal) { - return Cast.boxTo(definition.intType, definition.intType, explicit, definition.intType); - } else if (expected.clazz == Long.class && internal) { - return Cast.boxTo(definition.intType, definition.longType, explicit, definition.longType); - } else if (expected.clazz == Float.class && internal) { - return Cast.boxTo(definition.intType, definition.floatType, explicit, definition.floatType); - } else if (expected.clazz == Double.class && internal) { - return Cast.boxTo(definition.intType, definition.doubleType, explicit, definition.doubleType); + } else if (actual == int.class) { + if (expected == def.class) { + return Cast.boxFrom(Integer.class, def.class, explicit, int.class); + } else if (expected == Object.class && internal) { + return Cast.boxFrom(Integer.class, Object.class, explicit, int.class); + } else if (expected == Number.class && internal) { + return Cast.boxFrom(Integer.class, Number.class, explicit, int.class); + } else if (expected == byte.class && explicit) { + return Cast.standard(int.class, byte.class, true); + } else if (expected == char.class && explicit) { + return Cast.standard(int.class, char.class, true); + } else if (expected == short.class && explicit) { + return Cast.standard(int.class, short.class, true); + } else if (expected == long.class) { + return Cast.standard(int.class, long.class, explicit); + } else if (expected == float.class) { + return Cast.standard(int.class, float.class, explicit); + } else if (expected == double.class) { + return Cast.standard(int.class, double.class, explicit); + } else if (expected == Byte.class && explicit && internal) { + return Cast.boxTo(int.class, byte.class, true, byte.class); + } else if (expected == Short.class && explicit && internal) { + return Cast.boxTo(int.class, short.class, true, short.class); + } else if (expected == Character.class && explicit && internal) { + return Cast.boxTo(int.class, char.class, true, char.class); + } else if (expected == Integer.class && internal) { + return Cast.boxTo(int.class, int.class, explicit, int.class); + } else if (expected == Long.class && internal) { + return Cast.boxTo(int.class, long.class, explicit, long.class); + } else if (expected == Float.class && internal) { + return Cast.boxTo(int.class, float.class, explicit, float.class); + } else if (expected == Double.class && internal) { + return Cast.boxTo(int.class, double.class, explicit, double.class); } - } else if (actual.clazz == long.class) { - if (expected.dynamic) { - return Cast.boxFrom(definition.LongType, definition.DefType, explicit, definition.longType); - } else if (expected.clazz == Object.class && internal) { - return Cast.boxFrom(definition.LongType, definition.ObjectType, explicit, definition.longType); - } else if (expected.clazz == Number.class && internal) { - return Cast.boxFrom(definition.LongType, definition.NumberType, explicit, definition.longType); - } else if (expected.clazz == byte.class && explicit) { - return Cast.standard(definition.longType, definition.byteType, true); - } else if (expected.clazz == char.class && explicit) { - return Cast.standard(definition.longType, definition.charType, true); - } else if (expected.clazz == short.class && explicit) { - return Cast.standard(definition.longType, definition.shortType, true); - } else if (expected.clazz == int.class && explicit) { - return Cast.standard(definition.longType, definition.intType, true); - } else if (expected.clazz == float.class) { - return Cast.standard(definition.longType, definition.floatType, explicit); - } else if (expected.clazz == double.class) { - return Cast.standard(definition.longType, definition.doubleType, explicit); - } else if (expected.clazz == Byte.class && explicit && internal) { - return Cast.boxTo(definition.longType, definition.byteType, true, definition.byteType); - } else if (expected.clazz == Short.class && explicit && internal) { - return Cast.boxTo(definition.longType, definition.shortType, true, definition.shortType); - } else if (expected.clazz == Character.class && explicit && internal) { - return Cast.boxTo(definition.longType, definition.charType, true, definition.charType); - } else if (expected.clazz == Integer.class && explicit && internal) { - return Cast.boxTo(definition.longType, definition.intType, true, definition.intType); - } else if (expected.clazz == Long.class && internal) { - return Cast.boxTo(definition.longType, definition.longType, explicit, definition.longType); - } else if (expected.clazz == Float.class && internal) { - return Cast.boxTo(definition.longType, definition.floatType, explicit, definition.floatType); - } else if (expected.clazz == Double.class && internal) { - return Cast.boxTo(definition.longType, definition.doubleType, explicit, definition.doubleType); + } else if (actual == long.class) { + if (expected == def.class) { + return Cast.boxFrom(Long.class, def.class, explicit, long.class); + } else if (expected == Object.class && internal) { + return Cast.boxFrom(Long.class, Object.class, explicit, long.class); + } else if (expected == Number.class && internal) { + return Cast.boxFrom(Long.class, Number.class, explicit, long.class); + } else if (expected == byte.class && explicit) { + return Cast.standard(long.class, byte.class, true); + } else if (expected == char.class && explicit) { + return Cast.standard(long.class, char.class, true); + } else if (expected == short.class && explicit) { + return Cast.standard(long.class, short.class, true); + } else if (expected == int.class && explicit) { + return Cast.standard(long.class, int.class, true); + } else if (expected == float.class) { + return Cast.standard(long.class, float.class, explicit); + } else if (expected == double.class) { + return Cast.standard(long.class, double.class, explicit); + } else if (expected == Byte.class && explicit && internal) { + return Cast.boxTo(long.class, byte.class, true, byte.class); + } else if (expected == Short.class && explicit && internal) { + return Cast.boxTo(long.class, short.class, true, short.class); + } else if (expected == Character.class && explicit && internal) { + return Cast.boxTo(long.class, char.class, true, char.class); + } else if (expected == Integer.class && explicit && internal) { + return Cast.boxTo(long.class, int.class, true, int.class); + } else if (expected == Long.class && internal) { + return Cast.boxTo(long.class, long.class, explicit, long.class); + } else if (expected == Float.class && internal) { + return Cast.boxTo(long.class, float.class, explicit, float.class); + } else if (expected == Double.class && internal) { + return Cast.boxTo(long.class, double.class, explicit, double.class); } - } else if (actual.clazz == float.class) { - if (expected.dynamic) { - return Cast.boxFrom(definition.FloatType, definition.DefType, explicit, definition.floatType); - } else if (expected.clazz == Object.class && internal) { - return Cast.boxFrom(definition.FloatType, definition.ObjectType, explicit, definition.floatType); - } else if (expected.clazz == Number.class && internal) { - return Cast.boxFrom(definition.FloatType, definition.NumberType, explicit, definition.floatType); - } else if (expected.clazz == byte.class && explicit) { - return Cast.standard(definition.floatType, definition.byteType, true); - } else if (expected.clazz == char.class && explicit) { - return Cast.standard(definition.floatType, definition.charType, true); - } else if (expected.clazz == short.class && explicit) { - return Cast.standard(definition.floatType, definition.shortType, true); - } else if (expected.clazz == int.class && explicit) { - return Cast.standard(definition.floatType, definition.intType, true); - } else if (expected.clazz == long.class && explicit) { - return Cast.standard(definition.floatType, definition.longType, true); - } else if (expected.clazz == double.class) { - return Cast.standard(definition.floatType, definition.doubleType, explicit); - } else if (expected.clazz == Byte.class && explicit && internal) { - return Cast.boxTo(definition.floatType, definition.byteType, true, definition.byteType); - } else if (expected.clazz == Short.class && explicit && internal) { - return Cast.boxTo(definition.floatType, definition.shortType, true, definition.shortType); - } else if (expected.clazz == Character.class && explicit && internal) { - return Cast.boxTo(definition.floatType, definition.charType, true, definition.charType); - } else if (expected.clazz == Integer.class && explicit && internal) { - return Cast.boxTo(definition.floatType, definition.intType, true, definition.intType); - } else if (expected.clazz == Long.class && explicit && internal) { - return Cast.boxTo(definition.floatType, definition.longType, true, definition.longType); - } else if (expected.clazz == Float.class && internal) { - return Cast.boxTo(definition.floatType, definition.floatType, explicit, definition.floatType); - } else if (expected.clazz == Double.class && internal) { - return Cast.boxTo(definition.floatType, definition.doubleType, explicit, definition.doubleType); + } else if (actual == float.class) { + if (expected == def.class) { + return Cast.boxFrom(Float.class, def.class, explicit, float.class); + } else if (expected == Object.class && internal) { + return Cast.boxFrom(Float.class, Object.class, explicit, float.class); + } else if (expected == Number.class && internal) { + return Cast.boxFrom(Float.class, Number.class, explicit, float.class); + } else if (expected == byte.class && explicit) { + return Cast.standard(float.class, byte.class, true); + } else if (expected == char.class && explicit) { + return Cast.standard(float.class, char.class, true); + } else if (expected == short.class && explicit) { + return Cast.standard(float.class, short.class, true); + } else if (expected == int.class && explicit) { + return Cast.standard(float.class, int.class, true); + } else if (expected == long.class && explicit) { + return Cast.standard(float.class, long.class, true); + } else if (expected == double.class) { + return Cast.standard(float.class, double.class, explicit); + } else if (expected == Byte.class && explicit && internal) { + return Cast.boxTo(float.class, byte.class, true, byte.class); + } else if (expected == Short.class && explicit && internal) { + return Cast.boxTo(float.class, short.class, true, short.class); + } else if (expected == Character.class && explicit && internal) { + return Cast.boxTo(float.class, char.class, true, char.class); + } else if (expected == Integer.class && explicit && internal) { + return Cast.boxTo(float.class, int.class, true, int.class); + } else if (expected == Long.class && explicit && internal) { + return Cast.boxTo(float.class, long.class, true, long.class); + } else if (expected == Float.class && internal) { + return Cast.boxTo(float.class, float.class, explicit, float.class); + } else if (expected == Double.class && internal) { + return Cast.boxTo(float.class, double.class, explicit, double.class); } - } else if (actual.clazz == double.class) { - if (expected.dynamic) { - return Cast.boxFrom(definition.DoubleType, definition.DefType, explicit, definition.doubleType); - } else if (expected.clazz == Object.class && internal) { - return Cast.boxFrom(definition.DoubleType, definition.ObjectType, explicit, definition.doubleType); - } else if (expected.clazz == Number.class && internal) { - return Cast.boxFrom(definition.DoubleType, definition.NumberType, explicit, definition.doubleType); - } else if (expected.clazz == byte.class && explicit) { - return Cast.standard(definition.doubleType, definition.byteType, true); - } else if (expected.clazz == char.class && explicit) { - return Cast.standard(definition.doubleType, definition.charType, true); - } else if (expected.clazz == short.class && explicit) { - return Cast.standard(definition.doubleType, definition.shortType, true); - } else if (expected.clazz == int.class && explicit) { - return Cast.standard(definition.doubleType, definition.intType, true); - } else if (expected.clazz == long.class && explicit) { - return Cast.standard(definition.doubleType, definition.longType, true); - } else if (expected.clazz == float.class && explicit) { - return Cast.standard(definition.doubleType, definition.floatType, true); - } else if (expected.clazz == Byte.class && explicit && internal) { - return Cast.boxTo(definition.doubleType, definition.byteType, true, definition.byteType); - } else if (expected.clazz == Short.class && explicit && internal) { - return Cast.boxTo(definition.doubleType, definition.shortType, true, definition.shortType); - } else if (expected.clazz == Character.class && explicit && internal) { - return Cast.boxTo(definition.doubleType, definition.charType, true, definition.charType); - } else if (expected.clazz == Integer.class && explicit && internal) { - return Cast.boxTo(definition.doubleType, definition.intType, true, definition.intType); - } else if (expected.clazz == Long.class && explicit && internal) { - return Cast.boxTo(definition.doubleType, definition.longType, true, definition.longType); - } else if (expected.clazz == Float.class && explicit && internal) { - return Cast.boxTo(definition.doubleType, definition.floatType, true, definition.floatType); - } else if (expected.clazz == Double.class && internal) { - return Cast.boxTo(definition.doubleType, definition.doubleType, explicit, definition.doubleType); + } else if (actual == double.class) { + if (expected == def.class) { + return Cast.boxFrom(Double.class, def.class, explicit, double.class); + } else if (expected == Object.class && internal) { + return Cast.boxFrom(Double.class, Object.class, explicit, double.class); + } else if (expected == Number.class && internal) { + return Cast.boxFrom(Double.class, Number.class, explicit, double.class); + } else if (expected == byte.class && explicit) { + return Cast.standard(double.class, byte.class, true); + } else if (expected == char.class && explicit) { + return Cast.standard(double.class, char.class, true); + } else if (expected == short.class && explicit) { + return Cast.standard(double.class, short.class, true); + } else if (expected == int.class && explicit) { + return Cast.standard(double.class, int.class, true); + } else if (expected == long.class && explicit) { + return Cast.standard(double.class, long.class, true); + } else if (expected == float.class && explicit) { + return Cast.standard(double.class, float.class, true); + } else if (expected == Byte.class && explicit && internal) { + return Cast.boxTo(double.class, byte.class, true, byte.class); + } else if (expected == Short.class && explicit && internal) { + return Cast.boxTo(double.class, short.class, true, short.class); + } else if (expected == Character.class && explicit && internal) { + return Cast.boxTo(double.class, char.class, true, char.class); + } else if (expected == Integer.class && explicit && internal) { + return Cast.boxTo(double.class, int.class, true, int.class); + } else if (expected == Long.class && explicit && internal) { + return Cast.boxTo(double.class, long.class, true, long.class); + } else if (expected == Float.class && explicit && internal) { + return Cast.boxTo(double.class, float.class, true, float.class); + } else if (expected == Double.class && internal) { + return Cast.boxTo(double.class, double.class, explicit, double.class); } - } else if (actual.clazz == Boolean.class) { - if (expected.clazz == boolean.class && internal) { - return Cast.unboxFrom(definition.booleanType, definition.booleanType, explicit, definition.booleanType); + } else if (actual == Boolean.class) { + if (expected == boolean.class && internal) { + return Cast.unboxFrom(boolean.class, boolean.class, explicit, boolean.class); } - } else if (actual.clazz == Byte.class) { - if (expected.clazz == byte.class && internal) { - return Cast.unboxFrom(definition.byteType, definition.byteType, explicit, definition.byteType); - } else if (expected.clazz == short.class && internal) { - return Cast.unboxFrom(definition.byteType, definition.shortType, explicit, definition.byteType); - } else if (expected.clazz == char.class && explicit && internal) { - return Cast.unboxFrom(definition.byteType, definition.charType, true, definition.byteType); - } else if (expected.clazz == int.class && internal) { - return Cast.unboxFrom(definition.byteType, definition.intType, explicit, definition.byteType); - } else if (expected.clazz == long.class && internal) { - return Cast.unboxFrom(definition.byteType, definition.longType, explicit, definition.byteType); - } else if (expected.clazz == float.class && internal) { - return Cast.unboxFrom(definition.byteType, definition.floatType, explicit, definition.byteType); - } else if (expected.clazz == double.class && internal) { - return Cast.unboxFrom(definition.byteType, definition.doubleType, explicit, definition.byteType); + } else if (actual == Byte.class) { + if (expected == byte.class && internal) { + return Cast.unboxFrom(byte.class, byte.class, explicit, byte.class); + } else if (expected == short.class && internal) { + return Cast.unboxFrom(byte.class, short.class, explicit, byte.class); + } else if (expected == char.class && explicit && internal) { + return Cast.unboxFrom(byte.class, char.class, true, byte.class); + } else if (expected == int.class && internal) { + return Cast.unboxFrom(byte.class, int.class, explicit, byte.class); + } else if (expected == long.class && internal) { + return Cast.unboxFrom(byte.class, long.class, explicit, byte.class); + } else if (expected == float.class && internal) { + return Cast.unboxFrom(byte.class, float.class, explicit, byte.class); + } else if (expected == double.class && internal) { + return Cast.unboxFrom(byte.class, double.class, explicit, byte.class); } - } else if (actual.clazz == Short.class) { - if (expected.clazz == byte.class && explicit && internal) { - return Cast.unboxFrom(definition.shortType, definition.byteType, true, definition.shortType); - } else if (expected.clazz == short.class && internal) { - return Cast.unboxFrom(definition.shortType, definition.shortType, explicit, definition.shortType); - } else if (expected.clazz == char.class && explicit && internal) { - return Cast.unboxFrom(definition.shortType, definition.charType, true, definition.shortType); - } else if (expected.clazz == int.class && internal) { - return Cast.unboxFrom(definition.shortType, definition.intType, explicit, definition.shortType); - } else if (expected.clazz == long.class && internal) { - return Cast.unboxFrom(definition.shortType, definition.longType, explicit, definition.shortType); - } else if (expected.clazz == float.class && internal) { - return Cast.unboxFrom(definition.shortType, definition.floatType, explicit, definition.shortType); - } else if (expected.clazz == double.class && internal) { - return Cast.unboxFrom(definition.shortType, definition.doubleType, explicit, definition.shortType); + } else if (actual == Short.class) { + if (expected == byte.class && explicit && internal) { + return Cast.unboxFrom(short.class, byte.class, true, short.class); + } else if (expected == short.class && internal) { + return Cast.unboxFrom(short.class, short.class, explicit, short.class); + } else if (expected == char.class && explicit && internal) { + return Cast.unboxFrom(short.class, char.class, true, short.class); + } else if (expected == int.class && internal) { + return Cast.unboxFrom(short.class, int.class, explicit, short.class); + } else if (expected == long.class && internal) { + return Cast.unboxFrom(short.class, long.class, explicit, short.class); + } else if (expected == float.class && internal) { + return Cast.unboxFrom(short.class, float.class, explicit, short.class); + } else if (expected == double.class && internal) { + return Cast.unboxFrom(short.class, double.class, explicit, short.class); } - } else if (actual.clazz == Character.class) { - if (expected.clazz == byte.class && explicit && internal) { - return Cast.unboxFrom(definition.charType, definition.byteType, true, definition.charType); - } else if (expected.clazz == short.class && explicit && internal) { - return Cast.unboxFrom(definition.charType, definition.shortType, true, definition.charType); - } else if (expected.clazz == char.class && internal) { - return Cast.unboxFrom(definition.charType, definition.charType, explicit, definition.charType); - } else if (expected.clazz == int.class && internal) { - return Cast.unboxFrom(definition.charType, definition.intType, explicit, definition.charType); - } else if (expected.clazz == long.class && internal) { - return Cast.unboxFrom(definition.charType, definition.longType, explicit, definition.charType); - } else if (expected.clazz == float.class && internal) { - return Cast.unboxFrom(definition.charType, definition.floatType, explicit, definition.charType); - } else if (expected.clazz == double.class && internal) { - return Cast.unboxFrom(definition.charType, definition.doubleType, explicit, definition.charType); + } else if (actual == Character.class) { + if (expected == byte.class && explicit && internal) { + return Cast.unboxFrom(char.class, byte.class, true, char.class); + } else if (expected == short.class && explicit && internal) { + return Cast.unboxFrom(char.class, short.class, true, char.class); + } else if (expected == char.class && internal) { + return Cast.unboxFrom(char.class, char.class, explicit, char.class); + } else if (expected == int.class && internal) { + return Cast.unboxFrom(char.class, int.class, explicit, char.class); + } else if (expected == long.class && internal) { + return Cast.unboxFrom(char.class, long.class, explicit, char.class); + } else if (expected == float.class && internal) { + return Cast.unboxFrom(char.class, float.class, explicit, char.class); + } else if (expected == double.class && internal) { + return Cast.unboxFrom(char.class, double.class, explicit, char.class); } - } else if (actual.clazz == Integer.class) { - if (expected.clazz == byte.class && explicit && internal) { - return Cast.unboxFrom(definition.intType, definition.byteType, true, definition.intType); - } else if (expected.clazz == short.class && explicit && internal) { - return Cast.unboxFrom(definition.intType, definition.shortType, true, definition.intType); - } else if (expected.clazz == char.class && explicit && internal) { - return Cast.unboxFrom(definition.intType, definition.charType, true, definition.intType); - } else if (expected.clazz == int.class && internal) { - return Cast.unboxFrom(definition.intType, definition.intType, explicit, definition.intType); - } else if (expected.clazz == long.class && internal) { - return Cast.unboxFrom(definition.intType, definition.longType, explicit, definition.intType); - } else if (expected.clazz == float.class && internal) { - return Cast.unboxFrom(definition.intType, definition.floatType, explicit, definition.intType); - } else if (expected.clazz == double.class && internal) { - return Cast.unboxFrom(definition.intType, definition.doubleType, explicit, definition.intType); + } else if (actual == Integer.class) { + if (expected == byte.class && explicit && internal) { + return Cast.unboxFrom(int.class, byte.class, true, int.class); + } else if (expected == short.class && explicit && internal) { + return Cast.unboxFrom(int.class, short.class, true, int.class); + } else if (expected == char.class && explicit && internal) { + return Cast.unboxFrom(int.class, char.class, true, int.class); + } else if (expected == int.class && internal) { + return Cast.unboxFrom(int.class, int.class, explicit, int.class); + } else if (expected == long.class && internal) { + return Cast.unboxFrom(int.class, long.class, explicit, int.class); + } else if (expected == float.class && internal) { + return Cast.unboxFrom(int.class, float.class, explicit, int.class); + } else if (expected == double.class && internal) { + return Cast.unboxFrom(int.class, double.class, explicit, int.class); } - } else if (actual.clazz == Long.class) { - if (expected.clazz == byte.class && explicit && internal) { - return Cast.unboxFrom(definition.longType, definition.byteType, true, definition.longType); - } else if (expected.clazz == short.class && explicit && internal) { - return Cast.unboxFrom(definition.longType, definition.shortType, true, definition.longType); - } else if (expected.clazz == char.class && explicit && internal) { - return Cast.unboxFrom(definition.longType, definition.charType, true, definition.longType); - } else if (expected.clazz == int.class && explicit && internal) { - return Cast.unboxFrom(definition.longType, definition.intType, true, definition.longType); - } else if (expected.clazz == long.class && internal) { - return Cast.unboxFrom(definition.longType, definition.longType, explicit, definition.longType); - } else if (expected.clazz == float.class && internal) { - return Cast.unboxFrom(definition.longType, definition.floatType, explicit, definition.longType); - } else if (expected.clazz == double.class && internal) { - return Cast.unboxFrom(definition.longType, definition.doubleType, explicit, definition.longType); + } else if (actual == Long.class) { + if (expected == byte.class && explicit && internal) { + return Cast.unboxFrom(long.class, byte.class, true, long.class); + } else if (expected == short.class && explicit && internal) { + return Cast.unboxFrom(long.class, short.class, true, long.class); + } else if (expected == char.class && explicit && internal) { + return Cast.unboxFrom(long.class, char.class, true, long.class); + } else if (expected == int.class && explicit && internal) { + return Cast.unboxFrom(long.class, int.class, true, long.class); + } else if (expected == long.class && internal) { + return Cast.unboxFrom(long.class, long.class, explicit, long.class); + } else if (expected == float.class && internal) { + return Cast.unboxFrom(long.class, float.class, explicit, long.class); + } else if (expected == double.class && internal) { + return Cast.unboxFrom(long.class, double.class, explicit, long.class); } - } else if (actual.clazz == Float.class) { - if (expected.clazz == byte.class && explicit && internal) { - return Cast.unboxFrom(definition.floatType, definition.byteType, true, definition.floatType); - } else if (expected.clazz == short.class && explicit && internal) { - return Cast.unboxFrom(definition.floatType, definition.shortType, true, definition.floatType); - } else if (expected.clazz == char.class && explicit && internal) { - return Cast.unboxFrom(definition.floatType, definition.charType, true, definition.floatType); - } else if (expected.clazz == int.class && explicit && internal) { - return Cast.unboxFrom(definition.floatType, definition.intType, true, definition.floatType); - } else if (expected.clazz == long.class && explicit && internal) { - return Cast.unboxFrom(definition.floatType, definition.longType, true, definition.floatType); - } else if (expected.clazz == float.class && internal) { - return Cast.unboxFrom(definition.floatType, definition.floatType, explicit, definition.floatType); - } else if (expected.clazz == double.class && internal) { - return Cast.unboxFrom(definition.floatType, definition.doubleType, explicit, definition.floatType); + } else if (actual == Float.class) { + if (expected == byte.class && explicit && internal) { + return Cast.unboxFrom(float.class, byte.class, true, float.class); + } else if (expected == short.class && explicit && internal) { + return Cast.unboxFrom(float.class, short.class, true, float.class); + } else if (expected == char.class && explicit && internal) { + return Cast.unboxFrom(float.class, char.class, true, float.class); + } else if (expected == int.class && explicit && internal) { + return Cast.unboxFrom(float.class, int.class, true, float.class); + } else if (expected == long.class && explicit && internal) { + return Cast.unboxFrom(float.class, long.class, true, float.class); + } else if (expected == float.class && internal) { + return Cast.unboxFrom(float.class, float.class, explicit, float.class); + } else if (expected == double.class && internal) { + return Cast.unboxFrom(float.class, double.class, explicit, float.class); } - } else if (actual.clazz == Double.class) { - if (expected.clazz == byte.class && explicit && internal) { - return Cast.unboxFrom(definition.doubleType, definition.byteType, true, definition.doubleType); - } else if (expected.clazz == short.class && explicit && internal) { - return Cast.unboxFrom(definition.doubleType, definition.shortType, true, definition.doubleType); - } else if (expected.clazz == char.class && explicit && internal) { - return Cast.unboxFrom(definition.doubleType, definition.charType, true, definition.doubleType); - } else if (expected.clazz == int.class && explicit && internal) { - return Cast.unboxFrom(definition.doubleType, definition.intType, true, definition.doubleType); - } else if (expected.clazz == long.class && explicit && internal) { - return Cast.unboxFrom(definition.doubleType, definition.longType, true, definition.doubleType); - } else if (expected.clazz == float.class && explicit && internal) { - return Cast.unboxFrom(definition.doubleType, definition.floatType, true, definition.doubleType); - } else if (expected.clazz == double.class && internal) { - return Cast.unboxFrom(definition.doubleType, definition.doubleType, explicit, definition.doubleType); + } else if (actual == Double.class) { + if (expected == byte.class && explicit && internal) { + return Cast.unboxFrom(double.class, byte.class, true, double.class); + } else if (expected == short.class && explicit && internal) { + return Cast.unboxFrom(double.class, short.class, true, double.class); + } else if (expected == char.class && explicit && internal) { + return Cast.unboxFrom(double.class, char.class, true, double.class); + } else if (expected == int.class && explicit && internal) { + return Cast.unboxFrom(double.class, int.class, true, double.class); + } else if (expected == long.class && explicit && internal) { + return Cast.unboxFrom(double.class, long.class, true, double.class); + } else if (expected == float.class && explicit && internal) { + return Cast.unboxFrom(double.class, float.class, true, double.class); + } else if (expected == double.class && internal) { + return Cast.unboxFrom(double.class, double.class, explicit, double.class); } } - if ( actual.dynamic || - (actual.clazz != void.class && expected.dynamic) || - expected.clazz.isAssignableFrom(actual.clazz) || - (actual.clazz.isAssignableFrom(expected.clazz) && explicit)) { + if ( actual == def.class || + (actual != void.class && expected == def.class) || + expected.isAssignableFrom(actual) || + (actual.isAssignableFrom(expected) && explicit)) { return Cast.standard(actual, expected, explicit); } else { - throw location.createError(new ClassCastException("Cannot cast from [" + actual.name + "] to [" + expected.name + "].")); + throw location.createError(new ClassCastException( + "Cannot cast from [" + Definition.ClassToName(actual) + "] to [" + Definition.ClassToName(expected) + "].")); } } - public Object constCast(Location location, final Object constant, final Cast cast) { - Class fsort = cast.from.clazz; - Class tsort = cast.to.clazz; + public static Object constCast(Location location, Object constant, Cast cast) { + Class fsort = cast.from; + Class tsort = cast.to; if (fsort == tsort) { return constant; @@ -485,7 +480,7 @@ public Object constCast(Location location, final Object constant, final Cast cas } else if (fsort == char.class && tsort == String.class) { return Utility.charToString((char)constant); } else if (fsort.isPrimitive() && fsort != boolean.class && tsort.isPrimitive() && tsort != boolean.class) { - final Number number; + Number number; if (fsort == char.class) { number = (int)(char)constant; @@ -502,232 +497,209 @@ public Object constCast(Location location, final Object constant, final Cast cas else if (tsort == double.class) return number.doubleValue(); else { throw location.createError(new IllegalStateException("Cannot cast from " + - "[" + cast.from.clazz.getCanonicalName() + "] to [" + cast.to.clazz.getCanonicalName() + "].")); + "[" + cast.from.getCanonicalName() + "] to [" + cast.to.getCanonicalName() + "].")); } } else { throw location.createError(new IllegalStateException("Cannot cast from " + - "[" + cast.from.clazz.getCanonicalName() + "] to [" + cast.to.clazz.getCanonicalName() + "].")); + "[" + cast.from.getCanonicalName() + "] to [" + cast.to.getCanonicalName() + "].")); } } - public Type promoteNumeric(Type from, boolean decimal) { - Class sort = from.clazz; - - if (from.dynamic) { - return definition.DefType; - } else if ((sort == double.class) && decimal) { - return definition.doubleType; - } else if ((sort == float.class) && decimal) { - return definition.floatType; - } else if (sort == long.class) { - return definition.longType; - } else if (sort == int.class || sort == char.class || sort == short.class || sort == byte.class) { - return definition.intType; + public static Class promoteNumeric(Class from, boolean decimal) { + if (from == def.class || from == double.class && decimal || from == float.class && decimal || from == long.class) { + return from; + } else if (from == int.class || from == char.class || from == short.class || from == byte.class) { + return int.class; } return null; } - public Type promoteNumeric(Type from0, Type from1, boolean decimal) { - Class sort0 = from0.clazz; - Class sort1 = from1.clazz; - - if (from0.dynamic || from1.dynamic) { - return definition.DefType; + public static Class promoteNumeric(Class from0, Class from1, boolean decimal) { + if (from0 == def.class || from1 == def.class) { + return def.class; } if (decimal) { - if (sort0 == double.class || sort1 == double.class) { - return definition.doubleType; - } else if (sort0 == float.class || sort1 == float.class) { - return definition.floatType; + if (from0 == double.class || from1 == double.class) { + return double.class; + } else if (from0 == float.class || from1 == float.class) { + return float.class; } } - if (sort0 == long.class || sort1 == long.class) { - return definition.longType; - } else if (sort0 == int.class || sort1 == int.class || - sort0 == char.class || sort1 == char.class || - sort0 == short.class || sort1 == short.class || - sort0 == byte.class || sort1 == byte.class) { - return definition.intType; + if (from0 == long.class || from1 == long.class) { + return long.class; + } else if (from0 == int.class || from1 == int.class || + from0 == char.class || from1 == char.class || + from0 == short.class || from1 == short.class || + from0 == byte.class || from1 == byte.class) { + return int.class; } return null; } - public Type promoteAdd(Type from0, Type from1) { - Class sort0 = from0.clazz; - Class sort1 = from1.clazz; - - if (sort0 == String.class || sort1 == String.class) { - return definition.StringType; + public static Class promoteAdd(Class from0, Class from1) { + if (from0 == String.class || from1 == String.class) { + return String.class; } return promoteNumeric(from0, from1, true); } - public Type promoteXor(Type from0, Type from1) { - Class sort0 = from0.clazz; - Class sort1 = from1.clazz; - - if (from0.dynamic || from1.dynamic) { - return definition.DefType; + public static Class promoteXor(Class from0, Class from1) { + if (from0 == def.class || from1 == def.class) { + return def.class; } - if (sort0 == boolean.class || sort1 == boolean.class) { - return definition.booleanType; + if (from0 == boolean.class || from1 == boolean.class) { + return boolean.class; } return promoteNumeric(from0, from1, false); } - public Type promoteEquality(Type from0, Type from1) { - Class sort0 = from0.clazz; - Class sort1 = from1.clazz; - - if (from0.dynamic || from1.dynamic) { - return definition.DefType; + public static Class promoteEquality(Class from0, Class from1) { + if (from0 == def.class || from1 == def.class) { + return def.class; } - if (sort0.isPrimitive() && sort1.isPrimitive()) { - if (sort0 == boolean.class && sort1 == boolean.class) { - return definition.booleanType; + if (from0.isPrimitive() && from1.isPrimitive()) { + if (from0 == boolean.class && from1 == boolean.class) { + return boolean.class; } return promoteNumeric(from0, from1, true); } - return definition.ObjectType; + return Object.class; } - public Type promoteConditional(Type from0, Type from1, Object const0, Object const1) { - if (from0.equals(from1)) { + public static Class promoteConditional(Class from0, Class from1, Object const0, Object const1) { + if (from0 == from1) { return from0; } - Class sort0 = from0.clazz; - Class sort1 = from1.clazz; - - if (from0.dynamic || from1.dynamic) { - return definition.DefType; + if (from0 == def.class || from1 == def.class) { + return def.class; } - if (sort0.isPrimitive() && sort1.isPrimitive()) { - if (sort0 == boolean.class && sort1 == boolean.class) { - return definition.booleanType; + if (from0.isPrimitive() && from1.isPrimitive()) { + if (from0 == boolean.class && from1 == boolean.class) { + return boolean.class; } - if (sort0 == double.class || sort1 == double.class) { - return definition.doubleType; - } else if (sort0 == float.class || sort1 == float.class) { - return definition.floatType; - } else if (sort0 == long.class || sort1 == long.class) { - return definition.longType; + if (from0 == double.class || from1 == double.class) { + return double.class; + } else if (from0 == float.class || from1 == float.class) { + return float.class; + } else if (from0 == long.class || from1 == long.class) { + return long.class; } else { - if (sort0 == byte.class) { - if (sort1 == byte.class) { - return definition.byteType; - } else if (sort1 == short.class) { + if (from0 == byte.class) { + if (from1 == byte.class) { + return byte.class; + } else if (from1 == short.class) { if (const1 != null) { final short constant = (short)const1; if (constant <= Byte.MAX_VALUE && constant >= Byte.MIN_VALUE) { - return definition.byteType; + return byte.class; } } - return definition.shortType; - } else if (sort1 == char.class) { - return definition.intType; - } else if (sort1 == int.class) { + return short.class; + } else if (from1 == char.class) { + return int.class; + } else if (from1 == int.class) { if (const1 != null) { final int constant = (int)const1; if (constant <= Byte.MAX_VALUE && constant >= Byte.MIN_VALUE) { - return definition.byteType; + return byte.class; } } - return definition.intType; + return int.class; } - } else if (sort0 == short.class) { - if (sort1 == byte.class) { + } else if (from0 == short.class) { + if (from1 == byte.class) { if (const0 != null) { final short constant = (short)const0; if (constant <= Byte.MAX_VALUE && constant >= Byte.MIN_VALUE) { - return definition.byteType; + return byte.class; } } - return definition.shortType; - } else if (sort1 == short.class) { - return definition.shortType; - } else if (sort1 == char.class) { - return definition.intType; - } else if (sort1 == int.class) { + return short.class; + } else if (from1 == short.class) { + return short.class; + } else if (from1 == char.class) { + return int.class; + } else if (from1 == int.class) { if (const1 != null) { final int constant = (int)const1; if (constant <= Short.MAX_VALUE && constant >= Short.MIN_VALUE) { - return definition.shortType; + return short.class; } } - return definition.intType; + return int.class; } - } else if (sort0 == char.class) { - if (sort1 == byte.class) { - return definition.intType; - } else if (sort1 == short.class) { - return definition.intType; - } else if (sort1 == char.class) { - return definition.charType; - } else if (sort1 == int.class) { + } else if (from0 == char.class) { + if (from1 == byte.class) { + return int.class; + } else if (from1 == short.class) { + return int.class; + } else if (from1 == char.class) { + return char.class; + } else if (from1 == int.class) { if (const1 != null) { final int constant = (int)const1; if (constant <= Character.MAX_VALUE && constant >= Character.MIN_VALUE) { - return definition.byteType; + return byte.class; } } - return definition.intType; + return int.class; } - } else if (sort0 == int.class) { - if (sort1 == byte.class) { + } else if (from0 == int.class) { + if (from1 == byte.class) { if (const0 != null) { final int constant = (int)const0; if (constant <= Byte.MAX_VALUE && constant >= Byte.MIN_VALUE) { - return definition.byteType; + return byte.class; } } - return definition.intType; - } else if (sort1 == short.class) { + return int.class; + } else if (from1 == short.class) { if (const0 != null) { final int constant = (int)const0; if (constant <= Short.MAX_VALUE && constant >= Short.MIN_VALUE) { - return definition.byteType; + return byte.class; } } - return definition.intType; - } else if (sort1 == char.class) { + return int.class; + } else if (from1 == char.class) { if (const0 != null) { final int constant = (int)const0; if (constant <= Character.MAX_VALUE && constant >= Character.MIN_VALUE) { - return definition.byteType; + return byte.class; } } - return definition.intType; - } else if (sort1 == int.class) { - return definition.intType; + return int.class; + } else if (from1 == int.class) { + return int.class; } } } @@ -737,6 +709,10 @@ public Type promoteConditional(Type from0, Type from1, Object const0, Object con // TODO: to calculate the highest upper bound for the two types and return that. // TODO: However, for now we just return objectType that may require an extra cast. - return definition.ObjectType; + return Object.class; + } + + private AnalyzerCaster() { + } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java index 7729c5319ea81..36c072570ec14 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java @@ -27,6 +27,7 @@ import java.lang.invoke.MethodType; import java.lang.reflect.Modifier; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -76,6 +77,13 @@ public final class Definition { public final Type ArrayListType; public final Type HashMapType; + /** Marker class for def type to be used during type analysis. */ + public static final class def { + private def() { + + } + } + public static final class Type { public final String name; public final int dimensions; @@ -365,40 +373,41 @@ public Method getFunctionalMethod() { } public static class Cast { + /** Create a standard cast with no boxing/unboxing. */ - public static Cast standard(Type from, Type to, boolean explicit) { + public static Cast standard(Class from, Class to, boolean explicit) { return new Cast(from, to, explicit, null, null, null, null); } /** Create a cast where the from type will be unboxed, and then the cast will be performed. */ - public static Cast unboxFrom(Type from, Type to, boolean explicit, Type unboxFrom) { + public static Cast unboxFrom(Class from, Class to, boolean explicit, Class unboxFrom) { return new Cast(from, to, explicit, unboxFrom, null, null, null); } /** Create a cast where the to type will be unboxed, and then the cast will be performed. */ - public static Cast unboxTo(Type from, Type to, boolean explicit, Type unboxTo) { + public static Cast unboxTo(Class from, Class to, boolean explicit, Class unboxTo) { return new Cast(from, to, explicit, null, unboxTo, null, null); } /** Create a cast where the from type will be boxed, and then the cast will be performed. */ - public static Cast boxFrom(Type from, Type to, boolean explicit, Type boxFrom) { + public static Cast boxFrom(Class from, Class to, boolean explicit, Class boxFrom) { return new Cast(from, to, explicit, null, null, boxFrom, null); } /** Create a cast where the to type will be boxed, and then the cast will be performed. */ - public static Cast boxTo(Type from, Type to, boolean explicit, Type boxTo) { + public static Cast boxTo(Class from, Class to, boolean explicit, Class boxTo) { return new Cast(from, to, explicit, null, null, null, boxTo); } - public final Type from; - public final Type to; + public final Class from; + public final Class to; public final boolean explicit; - public final Type unboxFrom; - public final Type unboxTo; - public final Type boxFrom; - public final Type boxTo; + public final Class unboxFrom; + public final Class unboxTo; + public final Class boxFrom; + public final Class boxTo; - private Cast(Type from, Type to, boolean explicit, Type unboxFrom, Type unboxTo, Type boxFrom, Type boxTo) { + private Cast(Class from, Class to, boolean explicit, Class unboxFrom, Class unboxTo, Class boxFrom, Class boxTo) { this.from = from; this.to = to; this.explicit = explicit; @@ -499,6 +508,124 @@ public static boolean isConstantType(Type constant) { constant.clazz == String.class; } + public static Class ObjectClassTodefClass(Class clazz) { + if (clazz.isArray()) { + Class component = clazz.getComponentType(); + int dimensions = 1; + + while (component.isArray()) { + component = component.getComponentType(); + ++dimensions; + } + + if (component == Object.class) { + char[] braces = new char[dimensions]; + Arrays.fill(braces, '['); + + String descriptor = new String(braces) + org.objectweb.asm.Type.getType(def.class).getDescriptor(); + org.objectweb.asm.Type type = org.objectweb.asm.Type.getType(descriptor); + + try { + return Class.forName(type.getInternalName().replace('/', '.')); + } catch (ClassNotFoundException exception) { + throw new IllegalStateException("internal error", exception); + } + } + } else if (clazz == Object.class) { + return def.class; + } + + return clazz; + } + + public static Class defClassToObjectClass(Class clazz) { + if (clazz.isArray()) { + Class component = clazz.getComponentType(); + int dimensions = 1; + + while (component.isArray()) { + component = component.getComponentType(); + ++dimensions; + } + + if (component == def.class) { + char[] braces = new char[dimensions]; + Arrays.fill(braces, '['); + + String descriptor = new String(braces) + org.objectweb.asm.Type.getType(Object.class).getDescriptor(); + org.objectweb.asm.Type type = org.objectweb.asm.Type.getType(descriptor); + + try { + return Class.forName(type.getInternalName().replace('/', '.')); + } catch (ClassNotFoundException exception) { + throw new IllegalStateException("internal error", exception); + } + } + } else if (clazz == def.class) { + return Object.class; + } + + return clazz; + } + + public static String ClassToName(Class clazz) { + if (clazz.isArray()) { + Class component = clazz.getComponentType(); + int dimensions = 1; + + while (component.isArray()) { + component = component.getComponentType(); + ++dimensions; + } + + if (component == def.class) { + StringBuilder builder = new StringBuilder("def"); + + for (int dimension = 0; dimension < dimensions; dimensions++) { + builder.append("[]"); + } + + return builder.toString(); + } + } else if (clazz == def.class) { + return "def"; + } + + return clazz.getCanonicalName().replace('$', '.'); + } + + public Type ClassToType(Class clazz) { + if (clazz == null) { + return null; + } else if (clazz.isArray()) { + Class component = clazz.getComponentType(); + int dimensions = 1; + + while (component.isArray()) { + component = component.getComponentType(); + ++dimensions; + } + + if (clazz == def.class) { + return getType(structsMap.get("def"), dimensions); + } else { + return getType(runtimeMap.get(clazz).struct, dimensions); + } + } else if (clazz == def.class) { + return getType(structsMap.get("def"), 0); + } + + return getType(structsMap.get(ClassToName(clazz)), 0); + } + + public static Class TypeToClass (Type type) { + if (type.dynamic) { + return ObjectClassTodefClass(type.clazz); + } + + return type.clazz; + } + public RuntimeClass getRuntimeClass(Class clazz) { return runtimeMap.get(clazz); } @@ -536,8 +663,6 @@ private static String buildFieldCacheKey(String structName, String fieldName, St private final Map structsMap; private final Map simpleTypesMap; - public AnalyzerCaster caster; - public Definition(List whitelists) { structsMap = new HashMap<>(); simpleTypesMap = new HashMap<>(); @@ -719,8 +844,6 @@ public Definition(List whitelists) { IteratorType = getType("Iterator"); ArrayListType = getType("ArrayList"); HashMapType = getType("HashMap"); - - caster = new AnalyzerCaster(this); } private void addStruct(ClassLoader whitelistClassLoader, Whitelist.Struct whitelistStruct) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java index b0c15abbfb0d5..7925856656e15 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java @@ -20,15 +20,17 @@ package org.elasticsearch.painless; import org.elasticsearch.painless.Definition.Cast; -import org.elasticsearch.painless.Definition.Type; +import org.elasticsearch.painless.Definition.def; import org.objectweb.asm.ClassVisitor; import org.objectweb.asm.Label; import org.objectweb.asm.Opcodes; +import org.objectweb.asm.Type; import org.objectweb.asm.commons.GeneratorAdapter; import org.objectweb.asm.commons.Method; import java.util.ArrayDeque; import java.util.ArrayList; +import java.util.Arrays; import java.util.BitSet; import java.util.Deque; import java.util.List; @@ -128,68 +130,68 @@ public void writeLoopCounter(int slot, int count, Location location) { mark(end); } - public void writeCast(final Cast cast) { + public void writeCast(Cast cast) { if (cast != null) { - if (cast.from.clazz == char.class && cast.to.clazz == String.class) { + if (cast.from == char.class && cast.to == String.class) { invokeStatic(UTILITY_TYPE, CHAR_TO_STRING); - } else if (cast.from.clazz == String.class && cast.to.clazz == char.class) { + } else if (cast.from == String.class && cast.to == char.class) { invokeStatic(UTILITY_TYPE, STRING_TO_CHAR); } else if (cast.unboxFrom != null) { - unbox(cast.unboxFrom.type); + unbox(getType(cast.unboxFrom)); writeCast(cast.from, cast.to); } else if (cast.unboxTo != null) { - if (cast.from.dynamic) { + if (cast.from == def.class) { if (cast.explicit) { - if (cast.to.clazz == Boolean.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BOOLEAN); - else if (cast.to.clazz == Byte.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BYTE_EXPLICIT); - else if (cast.to.clazz == Short.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_SHORT_EXPLICIT); - else if (cast.to.clazz == Character.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_CHAR_EXPLICIT); - else if (cast.to.clazz == Integer.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_INT_EXPLICIT); - else if (cast.to.clazz == Long.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_LONG_EXPLICIT); - else if (cast.to.clazz == Float.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_FLOAT_EXPLICIT); - else if (cast.to.clazz == Double.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_DOUBLE_EXPLICIT); + if (cast.to == Boolean.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BOOLEAN); + else if (cast.to == Byte.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BYTE_EXPLICIT); + else if (cast.to == Short.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_SHORT_EXPLICIT); + else if (cast.to == Character.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_CHAR_EXPLICIT); + else if (cast.to == Integer.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_INT_EXPLICIT); + else if (cast.to == Long.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_LONG_EXPLICIT); + else if (cast.to == Float.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_FLOAT_EXPLICIT); + else if (cast.to == Double.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_DOUBLE_EXPLICIT); else { throw new IllegalStateException("Illegal tree structure."); } } else { - if (cast.to.clazz == Boolean.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BOOLEAN); - else if (cast.to.clazz == Byte.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BYTE_IMPLICIT); - else if (cast.to.clazz == Short.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_SHORT_IMPLICIT); - else if (cast.to.clazz == Character.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_CHAR_IMPLICIT); - else if (cast.to.clazz == Integer.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_INT_IMPLICIT); - else if (cast.to.clazz == Long.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_LONG_IMPLICIT); - else if (cast.to.clazz == Float.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_FLOAT_IMPLICIT); - else if (cast.to.clazz == Double.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_DOUBLE_IMPLICIT); + if (cast.to == Boolean.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BOOLEAN); + else if (cast.to == Byte.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BYTE_IMPLICIT); + else if (cast.to == Short.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_SHORT_IMPLICIT); + else if (cast.to == Character.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_CHAR_IMPLICIT); + else if (cast.to == Integer.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_INT_IMPLICIT); + else if (cast.to == Long.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_LONG_IMPLICIT); + else if (cast.to == Float.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_FLOAT_IMPLICIT); + else if (cast.to == Double.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_DOUBLE_IMPLICIT); else { throw new IllegalStateException("Illegal tree structure."); } } } else { writeCast(cast.from, cast.to); - unbox(cast.unboxTo.type); + unbox(getType(cast.unboxTo)); } } else if (cast.boxFrom != null) { - box(cast.boxFrom.type); + box(getType(cast.boxFrom)); writeCast(cast.from, cast.to); } else if (cast.boxTo != null) { writeCast(cast.from, cast.to); - box(cast.boxTo.type); + box(getType(cast.boxTo)); } else { writeCast(cast.from, cast.to); } } } - private void writeCast(final Type from, final Type to) { + private void writeCast(Class from, Class to) { if (from.equals(to)) { return; } - if (from.clazz != boolean.class && from.clazz.isPrimitive() && to.clazz != boolean.class && to.clazz.isPrimitive()) { - cast(from.type, to.type); + if (from != boolean.class && from.isPrimitive() && to != boolean.class && to.isPrimitive()) { + cast(getType(from), getType(to)); } else { - if (!to.clazz.isAssignableFrom(from.clazz)) { - checkCast(to.type); + if (!to.isAssignableFrom(from)) { + checkCast(getType(to)); } } } @@ -202,6 +204,29 @@ public void box(org.objectweb.asm.Type type) { valueOf(type); } + public static Type getType(Class clazz) { + if (clazz.isArray()) { + Class component = clazz.getComponentType(); + int dimensions = 1; + + while (component.isArray()) { + component = component.getComponentType(); + ++dimensions; + } + + if (component == def.class) { + char[] braces = new char[dimensions]; + Arrays.fill(braces, '['); + + return Type.getType(new String(braces) + Type.getType(Object.class).getDescriptor()); + } + } else if (clazz == def.class) { + return Type.getType(Object.class); + } + + return Type.getType(clazz); + } + public void writeBranch(final Label tru, final Label fals) { if (tru != null) { visitJumpInsn(Opcodes.IFNE, tru); @@ -227,7 +252,7 @@ public int writeNewStrings() { } } - public void writeAppendStrings(final Type type) { + public void writeAppendStrings(final Definition.Type type) { if (INDY_STRING_CONCAT_BOOTSTRAP_HANDLE != null) { // Java 9+: record type information stringConcatArgs.peek().add(type.type); @@ -267,7 +292,7 @@ public void writeToStrings() { } /** Writes a dynamic binary instruction: returnType, lhs, and rhs can be different */ - public void writeDynamicBinaryInstruction(Location location, Type returnType, Type lhs, Type rhs, + public void writeDynamicBinaryInstruction(Location location, Definition.Type returnType, Definition.Type lhs, Definition.Type rhs, Operation operation, int flags) { org.objectweb.asm.Type methodType = org.objectweb.asm.Type.getMethodType(returnType.type, lhs.type, rhs.type); @@ -318,7 +343,7 @@ public void writeDynamicBinaryInstruction(Location location, Type returnType, Ty } /** Writes a static binary instruction */ - public void writeBinaryInstruction(Location location, Type type, Operation operation) { + public void writeBinaryInstruction(Location location, Definition.Type type, Operation operation) { if ((type.clazz == float.class || type.clazz == double.class) && (operation == Operation.LSH || operation == Operation.USH || operation == Operation.RSH || operation == Operation.BWAND || diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AExpression.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AExpression.java index 2ca0b265430f9..eaa13ea9a8b17 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AExpression.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AExpression.java @@ -119,7 +119,8 @@ public abstract class AExpression extends ANode { * @return The new child node for the parent node calling this method. */ AExpression cast(Locals locals) { - Cast cast = locals.getDefinition().caster.getLegalCast(location, actual, expected, explicit, internal); + Cast cast = + AnalyzerCaster.getLegalCast(location, Definition.TypeToClass(actual), Definition.TypeToClass(expected), explicit, internal); if (cast == null) { if (constant == null || this instanceof EConstant) { @@ -167,7 +168,7 @@ AExpression cast(Locals locals) { // from this node because the output data for the EConstant // will already be the same. - constant = locals.getDefinition().caster.constCast(location, constant, cast); + constant = AnalyzerCaster.constCast(location, constant, cast); EConstant econstant = new EConstant(location, constant); econstant.analyze(locals); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EAssignment.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EAssignment.java index 873f109e72d47..45ca4601e963d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EAssignment.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EAssignment.java @@ -19,7 +19,10 @@ package org.elasticsearch.painless.node; + +import org.elasticsearch.painless.AnalyzerCaster; import org.elasticsearch.painless.DefBootstrap; +import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Definition.Cast; import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.Globals; @@ -139,33 +142,41 @@ private void analyzeCompound(Locals locals) { boolean shift = false; if (operation == Operation.MUL) { - promote = locals.getDefinition().caster.promoteNumeric(lhs.actual, rhs.actual, true); + promote = locals.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(lhs.actual), Definition.TypeToClass(rhs.actual), true)); } else if (operation == Operation.DIV) { - promote = locals.getDefinition().caster.promoteNumeric(lhs.actual, rhs.actual, true); + promote = locals.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(lhs.actual), Definition.TypeToClass(rhs.actual), true)); } else if (operation == Operation.REM) { - promote = locals.getDefinition().caster.promoteNumeric(lhs.actual, rhs.actual, true); + promote = locals.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(lhs.actual), Definition.TypeToClass(rhs.actual), true)); } else if (operation == Operation.ADD) { - promote = locals.getDefinition().caster.promoteAdd(lhs.actual, rhs.actual); + promote = locals.getDefinition().ClassToType( + AnalyzerCaster.promoteAdd(Definition.TypeToClass(lhs.actual), Definition.TypeToClass(rhs.actual))); } else if (operation == Operation.SUB) { - promote = locals.getDefinition().caster.promoteNumeric(lhs.actual, rhs.actual, true); + promote = locals.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(lhs.actual), Definition.TypeToClass(rhs.actual), true)); } else if (operation == Operation.LSH) { - promote = locals.getDefinition().caster.promoteNumeric(lhs.actual, false); - shiftDistance = locals.getDefinition().caster.promoteNumeric(rhs.actual, false); + promote = locals.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(lhs.actual), false)); + shiftDistance = locals.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(rhs.actual), false)); shift = true; } else if (operation == Operation.RSH) { - promote = locals.getDefinition().caster.promoteNumeric(lhs.actual, false); - shiftDistance = locals.getDefinition().caster.promoteNumeric(rhs.actual, false); + promote = locals.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(lhs.actual), false)); + shiftDistance = locals.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(rhs.actual), false)); shift = true; } else if (operation == Operation.USH) { - promote = locals.getDefinition().caster.promoteNumeric(lhs.actual, false); - shiftDistance = locals.getDefinition().caster.promoteNumeric(rhs.actual, false); + promote = locals.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(lhs.actual), false)); + shiftDistance = locals.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(rhs.actual), false)); shift = true; } else if (operation == Operation.BWAND) { - promote = locals.getDefinition().caster.promoteXor(lhs.actual, rhs.actual); + promote = locals.getDefinition().ClassToType( + AnalyzerCaster.promoteXor(Definition.TypeToClass(lhs.actual), Definition.TypeToClass(rhs.actual))); } else if (operation == Operation.XOR) { - promote = locals.getDefinition().caster.promoteXor(lhs.actual, rhs.actual); + promote = locals.getDefinition().ClassToType( + AnalyzerCaster.promoteXor(Definition.TypeToClass(lhs.actual), Definition.TypeToClass(rhs.actual))); } else if (operation == Operation.BWOR) { - promote = locals.getDefinition().caster.promoteXor(lhs.actual, rhs.actual); + promote = locals.getDefinition().ClassToType( + AnalyzerCaster.promoteXor(Definition.TypeToClass(lhs.actual), Definition.TypeToClass(rhs.actual))); } else { throw createError(new IllegalStateException("Illegal tree structure.")); } @@ -199,8 +210,8 @@ private void analyzeCompound(Locals locals) { rhs = rhs.cast(locals); - there = locals.getDefinition().caster.getLegalCast(location, lhs.actual, promote, false, false); - back = locals.getDefinition().caster.getLegalCast(location, promote, lhs.actual, true, false); + there = AnalyzerCaster.getLegalCast(location, Definition.TypeToClass(lhs.actual), Definition.TypeToClass(promote), false, false); + back = AnalyzerCaster.getLegalCast(location, Definition.TypeToClass(promote), Definition.TypeToClass(lhs.actual), true, false); this.statement = true; this.actual = read ? lhs.actual : locals.getDefinition().voidType; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java index df92d72a3c0c5..55c2145acd8cd 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java @@ -19,6 +19,7 @@ package org.elasticsearch.painless.node; +import org.elasticsearch.painless.AnalyzerCaster; import org.elasticsearch.painless.DefBootstrap; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Definition.Type; @@ -101,7 +102,8 @@ private void analyzeMul(Locals variables) { left.analyze(variables); right.analyze(variables); - promote = variables.getDefinition().caster.promoteNumeric(left.actual, right.actual, true); + promote = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), true)); if (promote == null) { throw createError(new ClassCastException("Cannot apply multiply [*] to types " + @@ -145,7 +147,8 @@ private void analyzeDiv(Locals variables) { left.analyze(variables); right.analyze(variables); - promote = variables.getDefinition().caster.promoteNumeric(left.actual, right.actual, true); + promote = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), true)); if (promote == null) { throw createError(new ClassCastException("Cannot apply divide [/] to types " + @@ -194,7 +197,8 @@ private void analyzeRem(Locals variables) { left.analyze(variables); right.analyze(variables); - promote = variables.getDefinition().caster.promoteNumeric(left.actual, right.actual, true); + promote = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), true)); if (promote == null) { throw createError(new ClassCastException("Cannot apply remainder [%] to types " + @@ -243,7 +247,8 @@ private void analyzeAdd(Locals variables) { left.analyze(variables); right.analyze(variables); - promote = variables.getDefinition().caster.promoteAdd(left.actual, right.actual); + promote = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteAdd(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual))); if (promote == null) { throw createError(new ClassCastException("Cannot apply add [+] to types " + @@ -303,7 +308,8 @@ private void analyzeSub(Locals variables) { left.analyze(variables); right.analyze(variables); - promote = variables.getDefinition().caster.promoteNumeric(left.actual, right.actual, true); + promote = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), true)); if (promote == null) { throw createError(new ClassCastException("Cannot apply subtract [-] to types " + @@ -362,8 +368,8 @@ private void analyzeLSH(Locals variables) { left.analyze(variables); right.analyze(variables); - Type lhspromote = variables.getDefinition().caster.promoteNumeric(left.actual, false); - Type rhspromote = variables.getDefinition().caster.promoteNumeric(right.actual, false); + Type lhspromote = variables.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), false)); + Type rhspromote = variables.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(right.actual), false)); if (lhspromote == null || rhspromote == null) { throw createError(new ClassCastException("Cannot apply left shift [<<] to types " + @@ -411,8 +417,8 @@ private void analyzeRSH(Locals variables) { left.analyze(variables); right.analyze(variables); - Type lhspromote = variables.getDefinition().caster.promoteNumeric(left.actual, false); - Type rhspromote = variables.getDefinition().caster.promoteNumeric(right.actual, false); + Type lhspromote = variables.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), false)); + Type rhspromote = variables.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(right.actual), false)); if (lhspromote == null || rhspromote == null) { throw createError(new ClassCastException("Cannot apply right shift [>>] to types " + @@ -460,8 +466,8 @@ private void analyzeUSH(Locals variables) { left.analyze(variables); right.analyze(variables); - Type lhspromote = variables.getDefinition().caster.promoteNumeric(left.actual, false); - Type rhspromote = variables.getDefinition().caster.promoteNumeric(right.actual, false); + Type lhspromote = variables.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), false)); + Type rhspromote = variables.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(right.actual), false)); actual = promote = lhspromote; shiftDistance = rhspromote; @@ -509,7 +515,8 @@ private void analyzeBWAnd(Locals variables) { left.analyze(variables); right.analyze(variables); - promote = variables.getDefinition().caster.promoteNumeric(left.actual, right.actual, false); + promote = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), false)); if (promote == null) { throw createError(new ClassCastException("Cannot apply and [&] to types " + @@ -550,7 +557,8 @@ private void analyzeXor(Locals variables) { left.analyze(variables); right.analyze(variables); - promote = variables.getDefinition().caster.promoteXor(left.actual, right.actual); + promote = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteXor(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual))); if (promote == null) { throw createError(new ClassCastException("Cannot apply xor [^] to types " + @@ -592,7 +600,8 @@ private void analyzeBWOr(Locals variables) { left.analyze(variables); right.analyze(variables); - promote = variables.getDefinition().caster.promoteNumeric(left.actual, right.actual, false); + promote = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), false)); if (promote == null) { throw createError(new ClassCastException("Cannot apply or [|] to types " + diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java index 564fcef8eef9f..e736b2779f932 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java @@ -82,11 +82,12 @@ void analyze(Locals locals) { for (int i = 0; i < ref.interfaceMethod.arguments.size(); ++i) { Definition.Type from = ref.interfaceMethod.arguments.get(i); Definition.Type to = ref.delegateMethod.arguments.get(i); - locals.getDefinition().caster.getLegalCast(location, from, to, false, true); + AnalyzerCaster.getLegalCast(location, Definition.TypeToClass(from), Definition.TypeToClass(to), false, true); } if (ref.interfaceMethod.rtn.equals(locals.getDefinition().voidType) == false) { - locals.getDefinition().caster.getLegalCast(location, ref.delegateMethod.rtn, ref.interfaceMethod.rtn, false, true); + AnalyzerCaster.getLegalCast(location, + Definition.TypeToClass(ref.delegateMethod.rtn), Definition.TypeToClass(ref.interfaceMethod.rtn), false, true); } } catch (IllegalArgumentException e) { throw createError(e); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECast.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECast.java index 5c2a149876139..42ec197c7f5f3 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECast.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECast.java @@ -19,6 +19,7 @@ package org.elasticsearch.painless.node; +import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Definition.Cast; import java.util.Objects; @@ -63,6 +64,6 @@ void write(MethodWriter writer, Globals globals) { @Override public String toString() { - return singleLineToString(cast.to, child); + return singleLineToString(Definition.ClassToName(cast.to), child); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EComp.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EComp.java index 020ea48cd4c1b..a7bb57a1a35a4 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EComp.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EComp.java @@ -89,7 +89,8 @@ private void analyzeEq(Locals variables) { left.analyze(variables); right.analyze(variables); - promotedType = variables.getDefinition().caster.promoteEquality(left.actual, right.actual); + promotedType = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteEquality(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual))); if (promotedType == null) { throw createError(new ClassCastException("Cannot apply equals [==] to types " + @@ -140,7 +141,8 @@ private void analyzeEqR(Locals variables) { left.analyze(variables); right.analyze(variables); - promotedType = variables.getDefinition().caster.promoteEquality(left.actual, right.actual); + promotedType = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteEquality(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual))); if (promotedType == null) { throw createError(new ClassCastException("Cannot apply reference equals [===] to types " + @@ -182,7 +184,8 @@ private void analyzeNE(Locals variables) { left.analyze(variables); right.analyze(variables); - promotedType = variables.getDefinition().caster.promoteEquality(left.actual, right.actual); + promotedType = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteEquality(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual))); if (promotedType == null) { throw createError(new ClassCastException("Cannot apply not equals [!=] to types " + @@ -233,7 +236,8 @@ private void analyzeNER(Locals variables) { left.analyze(variables); right.analyze(variables); - promotedType = variables.getDefinition().caster.promoteEquality(left.actual, right.actual); + promotedType = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteEquality(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual))); if (promotedType == null) { throw createError(new ClassCastException("Cannot apply reference not equals [!==] to types " + @@ -275,7 +279,8 @@ private void analyzeGTE(Locals variables) { left.analyze(variables); right.analyze(variables); - promotedType = variables.getDefinition().caster.promoteNumeric(left.actual, right.actual, true); + promotedType = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), true)); if (promotedType == null) { throw createError(new ClassCastException("Cannot apply greater than or equals [>=] to types " + @@ -316,7 +321,8 @@ private void analyzeGT(Locals variables) { left.analyze(variables); right.analyze(variables); - promotedType = variables.getDefinition().caster.promoteNumeric(left.actual, right.actual, true); + promotedType = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), true)); if (promotedType == null) { throw createError(new ClassCastException("Cannot apply greater than [>] to types " + @@ -357,7 +363,8 @@ private void analyzeLTE(Locals variables) { left.analyze(variables); right.analyze(variables); - promotedType = variables.getDefinition().caster.promoteNumeric(left.actual, right.actual, true); + promotedType = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), true)); if (promotedType == null) { throw createError(new ClassCastException("Cannot apply less than or equals [<=] to types " + @@ -398,7 +405,8 @@ private void analyzeLT(Locals variables) { left.analyze(variables); right.analyze(variables); - promotedType = variables.getDefinition().caster.promoteNumeric(left.actual, right.actual, true); + promotedType = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), true)); if (promotedType == null) { throw createError(new ClassCastException("Cannot apply less than [>=] to types " + diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConditional.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConditional.java index 571e57cad24db..30a3d0d773f23 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConditional.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConditional.java @@ -79,7 +79,8 @@ void analyze(Locals locals) { right.analyze(locals); if (expected == null) { - final Type promote = locals.getDefinition().caster.promoteConditional(left.actual, right.actual, left.constant, right.constant); + Type promote = locals.getDefinition().ClassToType(AnalyzerCaster.promoteConditional( + Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), left.constant, right.constant)); left.expected = promote; right.expected = promote; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EElvis.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EElvis.java index e9816c524bf3b..6005a326fe92a 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EElvis.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EElvis.java @@ -20,6 +20,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.AnalyzerCaster; +import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; @@ -81,7 +82,8 @@ void analyze(Locals locals) { } if (expected == null) { - final Type promote = locals.getDefinition().caster.promoteConditional(lhs.actual, rhs.actual, lhs.constant, rhs.constant); + Type promote = locals.getDefinition().ClassToType(AnalyzerCaster.promoteConditional( + Definition.TypeToClass(lhs.actual), Definition.TypeToClass(rhs.actual), lhs.constant, rhs.constant)); lhs.expected = promote; rhs.expected = promote; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java index ffbb344f29cb9..13289809e49da 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java @@ -82,11 +82,12 @@ void analyze(Locals locals) { for (int i = 0; i < interfaceMethod.arguments.size(); ++i) { Definition.Type from = interfaceMethod.arguments.get(i); Definition.Type to = delegateMethod.arguments.get(i); - locals.getDefinition().caster.getLegalCast(location, from, to, false, true); + AnalyzerCaster.getLegalCast(location, Definition.TypeToClass(from), Definition.TypeToClass(to), false, true); } if (interfaceMethod.rtn.equals(locals.getDefinition().voidType) == false) { - locals.getDefinition().caster.getLegalCast(location, delegateMethod.rtn, interfaceMethod.rtn, false, true); + AnalyzerCaster.getLegalCast( + location, Definition.TypeToClass(delegateMethod.rtn), Definition.TypeToClass(interfaceMethod.rtn), false, true); } } else { // whitelist lookup diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java index 07de9138e7ca4..68950f5ea2a8b 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java @@ -192,11 +192,12 @@ void analyze(Locals locals) { for (int i = 0; i < interfaceMethod.arguments.size(); ++i) { Type from = interfaceMethod.arguments.get(i); Type to = desugared.parameters.get(i + captures.size()).type; - locals.getDefinition().caster.getLegalCast(location, from, to, false, true); + AnalyzerCaster.getLegalCast(location, Definition.TypeToClass(from), Definition.TypeToClass(to), false, true); } if (interfaceMethod.rtn.equals(locals.getDefinition().voidType) == false) { - locals.getDefinition().caster.getLegalCast(location, desugared.rtnType, interfaceMethod.rtn, false, true); + AnalyzerCaster.getLegalCast( + location, Definition.TypeToClass(desugared.rtnType), Definition.TypeToClass(interfaceMethod.rtn), false, true); } actual = expected; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EUnary.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EUnary.java index e9971b538f5af..aa81407819eb9 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EUnary.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EUnary.java @@ -90,7 +90,7 @@ void analyzeNot(Locals variables) { void analyzeBWNot(Locals variables) { child.analyze(variables); - promote = variables.getDefinition().caster.promoteNumeric(child.actual, false); + promote = variables.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(child.actual), false)); if (promote == null) { throw createError(new ClassCastException("Cannot apply not [~] to type [" + child.actual.name + "].")); @@ -121,7 +121,7 @@ void analyzeBWNot(Locals variables) { void analyzerAdd(Locals variables) { child.analyze(variables); - promote = variables.getDefinition().caster.promoteNumeric(child.actual, true); + promote = variables.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(child.actual), true)); if (promote == null) { throw createError(new ClassCastException("Cannot apply positive [+] to type [" + child.actual.name + "].")); @@ -156,7 +156,7 @@ void analyzerAdd(Locals variables) { void analyzerSub(Locals variables) { child.analyze(variables); - promote = variables.getDefinition().caster.promoteNumeric(child.actual, true); + promote = variables.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(child.actual), true)); if (promote == null) { throw createError(new ClassCastException("Cannot apply negative [-] to type [" + child.actual.name + "].")); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachArray.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachArray.java index 09c73c525bec0..a4c2eb8cd22cf 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachArray.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachArray.java @@ -64,13 +64,10 @@ void extractVariables(Set variables) { void analyze(Locals locals) { // We must store the array and index as variables for securing slots on the stack, and // also add the location offset to make the names unique in case of nested for each loops. - array = locals.addVariable(location, expression.actual, "#array" + location.getOffset(), - true); - index = locals.addVariable(location, locals.getDefinition().intType, "#index" + location.getOffset(), - true); - indexed = locals.getDefinition().getType(expression.actual.struct, - expression.actual.dimensions - 1); - cast = locals.getDefinition().caster.getLegalCast(location, indexed, variable.type, true, true); + array = locals.addVariable(location, expression.actual, "#array" + location.getOffset(), true); + index = locals.addVariable(location, locals.getDefinition().intType, "#index" + location.getOffset(), true); + indexed = locals.getDefinition().getType(expression.actual.struct, expression.actual.dimensions - 1); + cast = AnalyzerCaster.getLegalCast(location, Definition.TypeToClass(indexed), Definition.TypeToClass(variable.type), true, true); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachIterable.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachIterable.java index a51a459f0f3f8..26fb4a2f8459a 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachIterable.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachIterable.java @@ -25,6 +25,7 @@ import org.elasticsearch.painless.Definition.Cast; import org.elasticsearch.painless.Definition.Method; import org.elasticsearch.painless.Definition.MethodKey; +import org.elasticsearch.painless.Definition.def; import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Locals.Variable; @@ -85,7 +86,7 @@ void analyze(Locals locals) { } } - cast = locals.getDefinition().caster.getLegalCast(location, locals.getDefinition().DefType, variable.type, true, true); + cast = AnalyzerCaster.getLegalCast(location, def.class, Definition.TypeToClass(variable.type), true, true); } @Override diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java index 919b0881c0794..69abc3481a188 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.painless; import org.elasticsearch.painless.Definition.Cast; -import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.test.ESTestCase; @@ -28,73 +27,73 @@ public class AnalyzerCasterTests extends ESTestCase { private static final Definition definition = new Definition(Whitelist.BASE_WHITELISTS); - private static void assertCast(Type actual, Type expected, boolean mustBeExplicit) { + private static void assertCast(Class actual, Class expected, boolean mustBeExplicit) { Location location = new Location("dummy", 0); if (actual.equals(expected)) { assertFalse(mustBeExplicit); - assertNull(definition.caster.getLegalCast(location, actual, expected, false, false)); - assertNull(definition.caster.getLegalCast(location, actual, expected, true, false)); + assertNull(AnalyzerCaster.getLegalCast(location, actual, expected, false, false)); + assertNull(AnalyzerCaster.getLegalCast(location, actual, expected, true, false)); return; } - Cast cast = definition.caster.getLegalCast(location, actual, expected, true, false); + Cast cast = AnalyzerCaster.getLegalCast(location, actual, expected, true, false); assertEquals(actual, cast.from); assertEquals(expected, cast.to); if (mustBeExplicit) { ClassCastException error = expectThrows(ClassCastException.class, - () -> definition.caster.getLegalCast(location, actual, expected, false, false)); + () -> AnalyzerCaster.getLegalCast(location, actual, expected, false, false)); assertTrue(error.getMessage().startsWith("Cannot cast")); } else { - cast = definition.caster.getLegalCast(location, actual, expected, false, false); + cast = AnalyzerCaster.getLegalCast(location, actual, expected, false, false); assertEquals(actual, cast.from); assertEquals(expected, cast.to); } } public void testNumericCasts() { - assertCast(definition.byteType, definition.byteType, false); - assertCast(definition.byteType, definition.shortType, false); - assertCast(definition.byteType, definition.intType, false); - assertCast(definition.byteType, definition.longType, false); - assertCast(definition.byteType, definition.floatType, false); - assertCast(definition.byteType, definition.doubleType, false); + assertCast(byte.class, byte.class, false); + assertCast(byte.class, short.class, false); + assertCast(byte.class, int.class, false); + assertCast(byte.class, long.class, false); + assertCast(byte.class, float.class, false); + assertCast(byte.class, double.class, false); - assertCast(definition.shortType, definition.byteType, true); - assertCast(definition.shortType, definition.shortType, false); - assertCast(definition.shortType, definition.intType, false); - assertCast(definition.shortType, definition.longType, false); - assertCast(definition.shortType, definition.floatType, false); - assertCast(definition.shortType, definition.doubleType, false); + assertCast(short.class, byte.class, true); + assertCast(short.class, short.class, false); + assertCast(short.class, int.class, false); + assertCast(short.class, long.class, false); + assertCast(short.class, float.class, false); + assertCast(short.class, double.class, false); - assertCast(definition.intType, definition.byteType, true); - assertCast(definition.intType, definition.shortType, true); - assertCast(definition.intType, definition.intType, false); - assertCast(definition.intType, definition.longType, false); - assertCast(definition.intType, definition.floatType, false); - assertCast(definition.intType, definition.doubleType, false); + assertCast(int.class, byte.class, true); + assertCast(int.class, short.class, true); + assertCast(int.class, int.class, false); + assertCast(int.class, long.class, false); + assertCast(int.class, float.class, false); + assertCast(int.class, double.class, false); - assertCast(definition.longType, definition.byteType, true); - assertCast(definition.longType, definition.shortType, true); - assertCast(definition.longType, definition.intType, true); - assertCast(definition.longType, definition.longType, false); - assertCast(definition.longType, definition.floatType, false); - assertCast(definition.longType, definition.doubleType, false); + assertCast(long.class, byte.class, true); + assertCast(long.class, short.class, true); + assertCast(long.class, int.class, true); + assertCast(long.class, long.class, false); + assertCast(long.class, float.class, false); + assertCast(long.class, double.class, false); - assertCast(definition.floatType, definition.byteType, true); - assertCast(definition.floatType, definition.shortType, true); - assertCast(definition.floatType, definition.intType, true); - assertCast(definition.floatType, definition.longType, true); - assertCast(definition.floatType, definition.floatType, false); - assertCast(definition.floatType, definition.doubleType, false); + assertCast(float.class, byte.class, true); + assertCast(float.class, short.class, true); + assertCast(float.class, int.class, true); + assertCast(float.class, long.class, true); + assertCast(float.class, float.class, false); + assertCast(float.class, double.class, false); - assertCast(definition.doubleType, definition.byteType, true); - assertCast(definition.doubleType, definition.shortType, true); - assertCast(definition.doubleType, definition.intType, true); - assertCast(definition.doubleType, definition.longType, true); - assertCast(definition.doubleType, definition.floatType, true); - assertCast(definition.doubleType, definition.doubleType, false); + assertCast(double.class, byte.class, true); + assertCast(double.class, short.class, true); + assertCast(double.class, int.class, true); + assertCast(double.class, long.class, true); + assertCast(double.class, float.class, true); + assertCast(double.class, double.class, false); } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java index 424b0c286ecff..fb1a004e3cd40 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java @@ -162,12 +162,12 @@ public void testECapturingFunctionRef() { public void testECast() { Location l = new Location(getTestName(), 0); AExpression child = new EConstant(l, "test"); - Cast cast = Cast.standard(definition.StringType, definition.IntegerType, true); + Cast cast = Cast.standard(String.class, Integer.class, true); assertEquals("(ECast java.lang.Integer (EConstant String 'test'))", new ECast(l, child, cast).toString()); l = new Location(getTestName(), 1); child = new EBinary(l, Operation.ADD, new EConstant(l, "test"), new EConstant(l, 12)); - cast = Cast.standard(definition.IntegerType, definition.BooleanType, true); + cast = Cast.standard(Integer.class, Boolean.class, true); assertEquals("(ECast java.lang.Boolean (EBinary (EConstant String 'test') + (EConstant Integer 12)))", new ECast(l, child, cast).toString()); } diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/15_update.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/15_update.yml index 0e319be97bf0b..20047e7d4825d 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/15_update.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/15_update.yml @@ -137,4 +137,4 @@ - match: { error.root_cause.0.type: "remote_transport_exception" } - match: { error.type: "illegal_argument_exception" } - - match: { error.reason: "Object has already been built and is self-referencing itself" } + - match: { error.reason: "Iterable object is self-referencing itself" } diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml index 28679cb223fd1..b7be116b38695 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml @@ -406,3 +406,39 @@ - match: { hits.hits.0._score: 1.0 } - match: { aggregations.value_agg.buckets.0.key: 2 } - match: { aggregations.value_agg.buckets.0.doc_count: 1 } + +--- +"Return self-referencing map": + - do: + indices.create: + index: test + body: + settings: + number_of_shards: "1" + + - do: + index: + index: test + type: test + id: 1 + body: { "genre": 1 } + + - do: + indices.refresh: {} + + - do: + catch: bad_request + index: test + search: + body: + aggs: + genre: + terms: + script: + lang: painless + source: "def x = [:] ; def y = [:] ; x.a = y ; y.a = x ; return x" + + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "Iterable object is self-referencing itself" } + - match: { error.type: "search_phase_execution_exception" } + - match: { error.reason: "all shards failed" } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java index 96ec29e2aa695..5770c91cfdb7e 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java @@ -207,8 +207,8 @@ public String typeName() { } @Override - public void checkCompatibility(MappedFieldType other, List conflicts, boolean strict) { - super.checkCompatibility(other, conflicts, strict); + public void checkCompatibility(MappedFieldType other, List conflicts) { + super.checkCompatibility(other, conflicts); if (scalingFactor != ((ScaledFloatFieldType) other).getScalingFactor()) { conflicts.add("mapper [" + name() + "] has different [scaling_factor] values"); } @@ -424,8 +424,8 @@ protected void parseCreateField(ParseContext context, List field } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); ScaledFloatFieldMapper other = (ScaledFloatFieldMapper) mergeWith; if (other.ignoreMalformed.explicit()) { this.ignoreMalformed = other.ignoreMalformed; diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java index c18b66cf61855..7a777963baa4e 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java @@ -202,8 +202,8 @@ protected String contentType() { } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer; this.enablePositionIncrements = ((TokenCountFieldMapper) mergeWith).enablePositionIncrements; } diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java index 633f10276096c..13c4e87f95efc 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java @@ -63,7 +63,7 @@ public void testMerge() throws IOException { .endObject().endObject().string(); MapperService mapperService = createIndex("test").mapperService(); DocumentMapper stage1 = mapperService.merge("person", - new CompressedXContent(stage1Mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + new CompressedXContent(stage1Mapping), MapperService.MergeReason.MAPPING_UPDATE); String stage2Mapping = XContentFactory.jsonBuilder().startObject() .startObject("person") @@ -75,7 +75,7 @@ public void testMerge() throws IOException { .endObject() .endObject().endObject().string(); DocumentMapper stage2 = mapperService.merge("person", - new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE); // previous mapper has not been modified assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("keyword")); diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentIdFieldMapper.java b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentIdFieldMapper.java index 21078c2763f1c..8130acac1af72 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentIdFieldMapper.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentIdFieldMapper.java @@ -194,8 +194,8 @@ protected void parseCreateField(ParseContext context, List field @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); ParentIdFieldMapper parentMergeWith = (ParentIdFieldMapper) mergeWith; this.children = parentMergeWith.children; } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java index b2ec28cf0c86b..d3164ae6a12da 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java @@ -316,8 +316,8 @@ public ParentIdFieldMapper getParentIdFieldMapper(String name, boolean isParent) } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); ParentJoinFieldMapper joinMergeWith = (ParentJoinFieldMapper) mergeWith; List conflicts = new ArrayList<>(); for (ParentIdFieldMapper mapper : parentIdFields) { @@ -347,7 +347,7 @@ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { conflicts.add("cannot remove child [" + child + "] in join field [" + name() + "]"); } } - ParentIdFieldMapper merged = (ParentIdFieldMapper) self.merge(mergeWithMapper, updateAllTypes); + ParentIdFieldMapper merged = (ParentIdFieldMapper) self.merge(mergeWithMapper); newParentIdFields.add(merged); } } @@ -356,7 +356,7 @@ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { } this.eagerGlobalOrdinals = joinMergeWith.eagerGlobalOrdinals; this.parentIdFields = Collections.unmodifiableList(newParentIdFields); - this.uniqueFieldMapper = (MetaJoinFieldMapper) uniqueFieldMapper.merge(joinMergeWith.uniqueFieldMapper, updateAllTypes); + this.uniqueFieldMapper = (MetaJoinFieldMapper) uniqueFieldMapper.merge(joinMergeWith.uniqueFieldMapper); uniqueFieldMapper.setFieldMapper(this); } diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/mapper/ParentJoinFieldMapperTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/mapper/ParentJoinFieldMapperTests.java index 6bfc9b87b78ce..285e7e80195af 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/mapper/ParentJoinFieldMapperTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/mapper/ParentJoinFieldMapperTests.java @@ -57,7 +57,7 @@ public void testSingleLevel() throws Exception { .endObject().string(); IndexService service = createIndex("test"); DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(service.mapperService())); // Doc without join @@ -106,7 +106,7 @@ public void testParentIdSpecifiedAsNumber() throws Exception { .endObject().string(); IndexService service = createIndex("test"); DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "2", XContentFactory.jsonBuilder().startObject() .startObject("join_field") @@ -141,7 +141,7 @@ public void testMultipleLevels() throws Exception { .endObject().string(); IndexService service = createIndex("test"); DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(service.mapperService())); // Doc without join @@ -221,7 +221,7 @@ public void testUpdateRelations() throws Exception { .endObject().endObject().string(); IndexService indexService = createIndex("test"); DocumentMapper docMapper = indexService.mapperService().merge("type", new CompressedXContent(mapping), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(indexService.mapperService())); { @@ -235,7 +235,7 @@ public void testUpdateRelations() throws Exception { .endObject().endObject().string(); IllegalStateException exc = expectThrows(IllegalStateException.class, () -> indexService.mapperService().merge("type", new CompressedXContent(updateMapping), - MapperService.MergeReason.MAPPING_UPDATE, false)); + MapperService.MergeReason.MAPPING_UPDATE)); assertThat(exc.getMessage(), containsString("cannot remove parent [parent] in join field [join_field]")); } @@ -251,7 +251,7 @@ public void testUpdateRelations() throws Exception { .endObject().endObject().string(); IllegalStateException exc = expectThrows(IllegalStateException.class, () -> indexService.mapperService().merge("type", new CompressedXContent(updateMapping), - MapperService.MergeReason.MAPPING_UPDATE, false)); + MapperService.MergeReason.MAPPING_UPDATE)); assertThat(exc.getMessage(), containsString("cannot remove child [grand_child2] in join field [join_field]")); } @@ -268,7 +268,7 @@ public void testUpdateRelations() throws Exception { .endObject().endObject().string(); IllegalStateException exc = expectThrows(IllegalStateException.class, () -> indexService.mapperService().merge("type", new CompressedXContent(updateMapping), - MapperService.MergeReason.MAPPING_UPDATE, false)); + MapperService.MergeReason.MAPPING_UPDATE)); assertThat(exc.getMessage(), containsString("cannot create child [parent] from an existing parent")); } @@ -285,7 +285,7 @@ public void testUpdateRelations() throws Exception { .endObject().endObject().string(); IllegalStateException exc = expectThrows(IllegalStateException.class, () -> indexService.mapperService().merge("type", new CompressedXContent(updateMapping), - MapperService.MergeReason.MAPPING_UPDATE, false)); + MapperService.MergeReason.MAPPING_UPDATE)); assertThat(exc.getMessage(), containsString("cannot create parent [grand_child2] from an existing child]")); } @@ -300,7 +300,7 @@ public void testUpdateRelations() throws Exception { .endObject() .endObject().endObject().string(); docMapper = indexService.mapperService().merge("type", new CompressedXContent(updateMapping), - MapperService.MergeReason.MAPPING_UPDATE, true); + MapperService.MergeReason.MAPPING_UPDATE); assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(indexService.mapperService())); ParentJoinFieldMapper mapper = ParentJoinFieldMapper.getMapper(indexService.mapperService()); assertTrue(mapper.hasChild("child2")); @@ -321,7 +321,7 @@ public void testUpdateRelations() throws Exception { .endObject() .endObject().endObject().string(); docMapper = indexService.mapperService().merge("type", new CompressedXContent(updateMapping), - MapperService.MergeReason.MAPPING_UPDATE, true); + MapperService.MergeReason.MAPPING_UPDATE); assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(indexService.mapperService())); ParentJoinFieldMapper mapper = ParentJoinFieldMapper.getMapper(indexService.mapperService()); assertTrue(mapper.hasParent("other")); @@ -349,7 +349,7 @@ public void testInvalidJoinFieldInsideObject() throws Exception { IndexService indexService = createIndex("test"); MapperParsingException exc = expectThrows(MapperParsingException.class, () -> indexService.mapperService().merge("type", new CompressedXContent(mapping), - MapperService.MergeReason.MAPPING_UPDATE, false)); + MapperService.MergeReason.MAPPING_UPDATE)); assertThat(exc.getRootCause().getMessage(), containsString("join field [object.join_field] cannot be added inside an object or in a multi-field")); } @@ -371,7 +371,7 @@ public void testInvalidJoinFieldInsideMultiFields() throws Exception { IndexService indexService = createIndex("test"); MapperParsingException exc = expectThrows(MapperParsingException.class, () -> indexService.mapperService().merge("type", new CompressedXContent(mapping), - MapperService.MergeReason.MAPPING_UPDATE, false)); + MapperService.MergeReason.MAPPING_UPDATE)); assertThat(exc.getRootCause().getMessage(), containsString("join field [number.join_field] cannot be added inside an object or in a multi-field")); } @@ -397,7 +397,7 @@ public void testMultipleJoinFields() throws Exception { .endObject() .endObject().string(); IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("type", - new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false)); + new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE)); assertThat(exc.getMessage(), containsString("Field [_parent_join] is defined twice in [type]")); } @@ -414,7 +414,7 @@ public void testMultipleJoinFields() throws Exception { .endObject() .endObject().string(); indexService.mapperService().merge("type", - new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); String updateMapping = XContentFactory.jsonBuilder().startObject() .startObject("properties") .startObject("another_join_field") @@ -423,7 +423,7 @@ public void testMultipleJoinFields() throws Exception { .endObject() .endObject().string(); IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("type", - new CompressedXContent(updateMapping), MapperService.MergeReason.MAPPING_UPDATE, false)); + new CompressedXContent(updateMapping), MapperService.MergeReason.MAPPING_UPDATE)); assertThat(exc.getMessage(), containsString("Field [_parent_join] is defined twice in [type]")); } } @@ -442,7 +442,7 @@ public void testEagerGlobalOrdinals() throws Exception { .endObject().string(); IndexService service = createIndex("test"); DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(service.mapperService())); assertFalse(service.mapperService().fullName("join_field").eagerGlobalOrdinals()); assertNotNull(service.mapperService().fullName("join_field#parent")); @@ -463,7 +463,7 @@ public void testEagerGlobalOrdinals() throws Exception { .endObject() .endObject().string(); service.mapperService().merge("type", new CompressedXContent(mapping), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); assertFalse(service.mapperService().fullName("join_field").eagerGlobalOrdinals()); assertNotNull(service.mapperService().fullName("join_field#parent")); assertFalse(service.mapperService().fullName("join_field#parent").eagerGlobalOrdinals()); diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java index 885c19c6cd45a..0ec6bec977e2e 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java @@ -132,7 +132,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws .endObject().endObject().endObject(); mapperService.merge(TYPE, - new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE); } /** diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java index 1776df49e1850..67b0051358b17 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java @@ -112,7 +112,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws .endObject().endObject().endObject(); mapperService.merge(TYPE, - new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE); } /** diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java index d30ddf98661de..a0883d5090adb 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java @@ -97,7 +97,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws BOOLEAN_FIELD_NAME, "type=boolean", DATE_FIELD_NAME, "type=date", OBJECT_FIELD_NAME, "type=object" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); mapperService.merge(CHILD_TYPE, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(CHILD_TYPE, "_parent", "type=" + PARENT_TYPE, STRING_FIELD_NAME, "type=text", @@ -107,7 +107,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws BOOLEAN_FIELD_NAME, "type=boolean", DATE_FIELD_NAME, "type=date", OBJECT_FIELD_NAME, "type=object" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); } @Override diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasParentQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasParentQueryBuilderTests.java index 8517348721e30..bd2c816b56566 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasParentQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasParentQueryBuilderTests.java @@ -88,7 +88,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws BOOLEAN_FIELD_NAME, "type=boolean", DATE_FIELD_NAME, "type=date", OBJECT_FIELD_NAME, "type=object" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); mapperService.merge(CHILD_TYPE, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(CHILD_TYPE, "_parent", "type=" + PARENT_TYPE, STRING_FIELD_NAME, "type=text", @@ -98,9 +98,9 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws BOOLEAN_FIELD_NAME, "type=boolean", DATE_FIELD_NAME, "type=date", OBJECT_FIELD_NAME, "type=object" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); mapperService.merge("just_a_type", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef("just_a_type" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); } /** diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyParentIdQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyParentIdQueryBuilderTests.java index f613e58498ace..d88f5b944c32d 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyParentIdQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyParentIdQueryBuilderTests.java @@ -72,7 +72,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws BOOLEAN_FIELD_NAME, "type=boolean", DATE_FIELD_NAME, "type=date", OBJECT_FIELD_NAME, "type=object" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); mapperService.merge(CHILD_TYPE, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(CHILD_TYPE, "_parent", "type=" + PARENT_TYPE, STRING_FIELD_NAME, "type=text", @@ -81,7 +81,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws BOOLEAN_FIELD_NAME, "type=boolean", DATE_FIELD_NAME, "type=date", OBJECT_FIELD_NAME, "type=object" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); } @Override diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentIdQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentIdQueryBuilderTests.java index 375923ebd9ab2..7c6dea967f344 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentIdQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentIdQueryBuilderTests.java @@ -104,7 +104,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws .endObject().endObject().endObject(); mapperService.merge(TYPE, - new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE); } @Override diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 93b2eb9d5550f..38844b2352b6e 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -136,13 +136,13 @@ public void init() throws Exception { .startObject("ip_field").field("type", "ip").endObject() .startObject("field").field("type", "keyword").endObject() .endObject().endObject().endObject().string(); - documentMapper = mapperService.merge("type", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE, true); + documentMapper = mapperService.merge("type", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE); String queryField = "query_field"; String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject(queryField).field("type", "percolator").endObject().endObject() .endObject().endObject().string(); - mapperService.merge("type", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true); + mapperService.merge("type", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE); fieldMapper = (PercolatorFieldMapper) mapperService.documentMapper("type").mappers().getMapper(queryField); fieldType = (PercolatorFieldMapper.FieldType) fieldMapper.fieldType(); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index 122cabc79eab6..428a10b809d68 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -98,10 +98,10 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws String docType = "_doc"; mapperService.merge(docType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(docType, queryField, "type=percolator" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); mapperService.merge(docType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(docType, STRING_FIELD_NAME, "type=text" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); if (mapperService.getIndexSettings().isSingleType() == false) { PercolateQueryBuilderTests.docType = docType; } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java index c8ac4212258a3..65cf23f8d6026 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java @@ -156,7 +156,7 @@ public void init() throws Exception { .startObject("number_field7").field("type", "ip").endObject() .startObject("date_field").field("type", "date").endObject() .endObject().endObject().endObject().string(); - mapperService.merge("doc", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("doc", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE); } private void addQueryFieldMappings() throws Exception { @@ -164,7 +164,7 @@ private void addQueryFieldMappings() throws Exception { String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject("doc") .startObject("properties").startObject(fieldName).field("type", "percolator").endObject().endObject() .endObject().endObject().string(); - mapperService.merge("doc", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("doc", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE); fieldType = (PercolatorFieldMapper.FieldType) mapperService.fullName(fieldName); } @@ -578,7 +578,7 @@ public void testAllowNoAdditionalSettings() throws Exception { .startObject("properties").startObject(fieldName).field("type", "percolator").field("index", "no").endObject().endObject() .endObject().endObject().string(); MapperParsingException e = expectThrows(MapperParsingException.class, () -> - mapperService.merge("doc", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true)); + mapperService.merge("doc", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), containsString("Mapping definition for [" + fieldName + "] has unsupported parameters: [index : no]")); } @@ -592,7 +592,7 @@ public void testMultiplePercolatorFields() throws Exception { .startObject("query_field2").field("type", "percolator").endObject() .endObject() .endObject().endObject().string(); - mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true); + mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE); QueryBuilder queryBuilder = matchQuery("field", "value"); ParsedDocument doc = mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", @@ -623,7 +623,7 @@ public void testNestedPercolatorField() throws Exception { .endObject() .endObject() .endObject().endObject().string(); - mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true); + mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE); QueryBuilder queryBuilder = matchQuery("field", "value"); ParsedDocument doc = mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java index 30ffaeff18b92..e8fe182726825 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java @@ -112,7 +112,6 @@ public void readFrom(StreamInput in) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.startObject("rank_eval"); builder.field("quality_level", evaluationResult); builder.startObject("details"); for (String key : details.keySet()) { @@ -127,7 +126,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endObject(); builder.endObject(); - builder.endObject(); return builder; } } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java index 827f7be4442e8..881b9e04709a7 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java @@ -92,23 +92,21 @@ public void testToXContent() throws IOException { XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); String xContent = response.toXContent(builder, ToXContent.EMPTY_PARAMS).bytes().utf8ToString(); assertEquals(("{" + - " \"rank_eval\": {" + - " \"quality_level\": 0.123," + - " \"details\": {" + - " \"coffee_query\": {" + - " \"quality_level\": 0.1," + - " \"unknown_docs\": [{\"_index\":\"index\",\"_id\":\"456\"}]," + - " \"hits\":[{\"hit\":{\"_index\":\"index\",\"_type\":\"\",\"_id\":\"123\",\"_score\":1.0}," + - " \"rating\":5}," + - " {\"hit\":{\"_index\":\"index\",\"_type\":\"\",\"_id\":\"456\",\"_score\":1.0}," + - " \"rating\":null}" + - " ]" + - " }" + - " }," + - " \"failures\": {" + - " \"beer_query\": {" + - " \"error\": \"ParsingException[someMsg]\"" + - " }" + + " \"quality_level\": 0.123," + + " \"details\": {" + + " \"coffee_query\": {" + + " \"quality_level\": 0.1," + + " \"unknown_docs\": [{\"_index\":\"index\",\"_id\":\"456\"}]," + + " \"hits\":[{\"hit\":{\"_index\":\"index\",\"_type\":\"\",\"_id\":\"123\",\"_score\":1.0}," + + " \"rating\":5}," + + " {\"hit\":{\"_index\":\"index\",\"_type\":\"\",\"_id\":\"456\",\"_score\":1.0}," + + " \"rating\":null}" + + " ]" + + " }" + + " }," + + " \"failures\": {" + + " \"beer_query\": {" + + " \"error\": \"ParsingException[someMsg]\"" + " }" + " }" + "}").replaceAll("\\s+", ""), xContent); diff --git a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml index a81df5fa3fafd..4a244dcb9e5e9 100644 --- a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml +++ b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml @@ -2,8 +2,8 @@ "Response format": - skip: - version: " - 6.99.99" - reason: the ranking evaluation feature is only available on 7.0 + version: " - 6.1.99" + reason: the ranking evaluation feature is available since 6.2 - do: indices.create: @@ -64,27 +64,27 @@ "metric" : { "precision": { "ignore_unlabeled" : true }} } - - match: { rank_eval.quality_level: 1} - - match: { rank_eval.details.amsterdam_query.quality_level: 1.0} - - match: { rank_eval.details.amsterdam_query.unknown_docs: [ {"_index": "foo", "_id": "doc4"}]} - - match: { rank_eval.details.amsterdam_query.metric_details: {"relevant_docs_retrieved": 2, "docs_retrieved": 2}} + - match: { quality_level: 1} + - match: { details.amsterdam_query.quality_level: 1.0} + - match: { details.amsterdam_query.unknown_docs: [ {"_index": "foo", "_id": "doc4"}]} + - match: { details.amsterdam_query.metric_details: {"relevant_docs_retrieved": 2, "docs_retrieved": 2}} - - length: { rank_eval.details.amsterdam_query.hits: 3} - - match: { rank_eval.details.amsterdam_query.hits.0.hit._id: "doc2"} - - match: { rank_eval.details.amsterdam_query.hits.0.rating: 1} - - match: { rank_eval.details.amsterdam_query.hits.1.hit._id: "doc3"} - - match: { rank_eval.details.amsterdam_query.hits.1.rating: 1} - - match: { rank_eval.details.amsterdam_query.hits.2.hit._id: "doc4"} - - is_false: rank_eval.details.amsterdam_query.hits.2.rating + - length: { details.amsterdam_query.hits: 3} + - match: { details.amsterdam_query.hits.0.hit._id: "doc2"} + - match: { details.amsterdam_query.hits.0.rating: 1} + - match: { details.amsterdam_query.hits.1.hit._id: "doc3"} + - match: { details.amsterdam_query.hits.1.rating: 1} + - match: { details.amsterdam_query.hits.2.hit._id: "doc4"} + - is_false: details.amsterdam_query.hits.2.rating - - match: { rank_eval.details.berlin_query.quality_level: 1.0} - - match: { rank_eval.details.berlin_query.unknown_docs: [ {"_index": "foo", "_id": "doc4"}]} - - match: { rank_eval.details.berlin_query.metric_details: {"relevant_docs_retrieved": 1, "docs_retrieved": 1}} - - length: { rank_eval.details.berlin_query.hits: 2} - - match: { rank_eval.details.berlin_query.hits.0.hit._id: "doc1" } - - match: { rank_eval.details.berlin_query.hits.0.rating: 1} - - match: { rank_eval.details.berlin_query.hits.1.hit._id: "doc4" } - - is_false: rank_eval.details.berlin_query.hits.1.rating + - match: { details.berlin_query.quality_level: 1.0} + - match: { details.berlin_query.unknown_docs: [ {"_index": "foo", "_id": "doc4"}]} + - match: { details.berlin_query.metric_details: {"relevant_docs_retrieved": 1, "docs_retrieved": 1}} + - length: { details.berlin_query.hits: 2} + - match: { details.berlin_query.hits.0.hit._id: "doc1" } + - match: { details.berlin_query.hits.0.rating: 1} + - match: { details.berlin_query.hits.1.hit._id: "doc4" } + - is_false: details.berlin_query.hits.1.rating --- "Mean Reciprocal Rank": @@ -152,14 +152,14 @@ } # average is (1/3 + 1/2)/2 = 5/12 ~ 0.41666666666666663 - - gt: {rank_eval.quality_level: 0.416} - - lt: {rank_eval.quality_level: 0.417} - - gt: {rank_eval.details.amsterdam_query.quality_level: 0.333} - - lt: {rank_eval.details.amsterdam_query.quality_level: 0.334} - - match: {rank_eval.details.amsterdam_query.metric_details: {"first_relevant": 3}} - - match: {rank_eval.details.amsterdam_query.unknown_docs: [ {"_index": "foo", "_id": "doc2"}, + - gt: {quality_level: 0.416} + - lt: {quality_level: 0.417} + - gt: {details.amsterdam_query.quality_level: 0.333} + - lt: {details.amsterdam_query.quality_level: 0.334} + - match: {details.amsterdam_query.metric_details: {"first_relevant": 3}} + - match: {details.amsterdam_query.unknown_docs: [ {"_index": "foo", "_id": "doc2"}, {"_index": "foo", "_id": "doc3"} ]} - - match: {rank_eval.details.berlin_query.quality_level: 0.5} - - match: {rank_eval.details.berlin_query.metric_details: {"first_relevant": 2}} - - match: {rank_eval.details.berlin_query.unknown_docs: [ {"_index": "foo", "_id": "doc1"}]} + - match: {details.berlin_query.quality_level: 0.5} + - match: {details.berlin_query.metric_details: {"first_relevant": 2}} + - match: {details.berlin_query.unknown_docs: [ {"_index": "foo", "_id": "doc1"}]} diff --git a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/20_dcg.yml b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/20_dcg.yml index 0aca6fdde9eae..fc5e6576ad4d1 100644 --- a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/20_dcg.yml +++ b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/20_dcg.yml @@ -2,8 +2,8 @@ "Response format": - skip: - version: " - 6.99.99" - reason: the ranking evaluation feature is only available on 7.0 + version: " - 6.1.99" + reason: the ranking evaluation feature is available since 6.2 - do: index: @@ -69,11 +69,11 @@ "metric" : { "dcg": {}} } - - gt: {rank_eval.quality_level: 13.848263 } - - lt: {rank_eval.quality_level: 13.848264 } - - gt: {rank_eval.details.dcg_query.quality_level: 13.848263} - - lt: {rank_eval.details.dcg_query.quality_level: 13.848264} - - match: {rank_eval.details.dcg_query.unknown_docs: [ ]} + - gt: {quality_level: 13.848263 } + - lt: {quality_level: 13.848264 } + - gt: {details.dcg_query.quality_level: 13.848263} + - lt: {details.dcg_query.quality_level: 13.848264} + - match: {details.dcg_query.unknown_docs: [ ]} # reverse the order in which the results are returned (less relevant docs first) @@ -96,11 +96,11 @@ "metric" : { "dcg": { }} } - - gt: {rank_eval.quality_level: 10.299674} - - lt: {rank_eval.quality_level: 10.299675} - - gt: {rank_eval.details.dcg_query_reverse.quality_level: 10.299674} - - lt: {rank_eval.details.dcg_query_reverse.quality_level: 10.299675} - - match: {rank_eval.details.dcg_query_reverse.unknown_docs: [ ]} + - gt: {quality_level: 10.299674} + - lt: {quality_level: 10.299675} + - gt: {details.dcg_query_reverse.quality_level: 10.299674} + - lt: {details.dcg_query_reverse.quality_level: 10.299675} + - match: {details.dcg_query_reverse.unknown_docs: [ ]} # if we mix both, we should get the average @@ -134,11 +134,11 @@ "metric" : { "dcg": { }} } - - gt: {rank_eval.quality_level: 12.073969} - - lt: {rank_eval.quality_level: 12.073970} - - gt: {rank_eval.details.dcg_query.quality_level: 13.848263} - - lt: {rank_eval.details.dcg_query.quality_level: 13.848264} - - match: {rank_eval.details.dcg_query.unknown_docs: [ ]} - - gt: {rank_eval.details.dcg_query_reverse.quality_level: 10.299674} - - lt: {rank_eval.details.dcg_query_reverse.quality_level: 10.299675} - - match: {rank_eval.details.dcg_query_reverse.unknown_docs: [ ]} + - gt: {quality_level: 12.073969} + - lt: {quality_level: 12.073970} + - gt: {details.dcg_query.quality_level: 13.848263} + - lt: {details.dcg_query.quality_level: 13.848264} + - match: {details.dcg_query.unknown_docs: [ ]} + - gt: {details.dcg_query_reverse.quality_level: 10.299674} + - lt: {details.dcg_query_reverse.quality_level: 10.299675} + - match: {details.dcg_query_reverse.unknown_docs: [ ]} diff --git a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/30_failures.yml b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/30_failures.yml index 55efcdd104a30..24902253eb0d0 100644 --- a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/30_failures.yml +++ b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/30_failures.yml @@ -2,8 +2,8 @@ "Response format": - skip: - version: " - 6.99.99" - reason: the ranking evaluation feature is only available on 7.0 + version: " - 6.1.99" + reason: the ranking evaluation feature is available since 6.2 - do: index: @@ -34,9 +34,9 @@ "metric" : { "precision": { "ignore_unlabeled" : true }} } - - match: { rank_eval.quality_level: 1} - - match: { rank_eval.details.amsterdam_query.quality_level: 1.0} - - match: { rank_eval.details.amsterdam_query.unknown_docs: [ ]} - - match: { rank_eval.details.amsterdam_query.metric_details: {"relevant_docs_retrieved": 1, "docs_retrieved": 1}} + - match: { quality_level: 1} + - match: { details.amsterdam_query.quality_level: 1.0} + - match: { details.amsterdam_query.unknown_docs: [ ]} + - match: { details.amsterdam_query.metric_details: {"relevant_docs_retrieved": 1, "docs_retrieved": 1}} - - is_true: rank_eval.failures.invalid_query + - is_true: failures.invalid_query diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index f29daf799122d..479fe78cc8071 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -52,13 +52,6 @@ dependencies { testCompile project(path: ':modules:parent-join', configuration: 'runtime') } -dependencyLicenses { - // Don't check the client's license. We know it. - dependencies = project.configurations.runtime.fileCollection { - it.group.startsWith('org.elasticsearch') == false - } - project.configurations.provided -} - thirdPartyAudit.excludes = [ // Commons logging 'javax.servlet.ServletContextEvent', diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java index 3ad48d803a437..f21fb45ed7a64 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java @@ -91,6 +91,7 @@ private void testCancel(String action, AbstractBulkByScrollRequestBuilder int numDocs = getNumShards(INDEX).numPrimaries * 10 * builder.request().getSlices(); ALLOWED_OPERATIONS.release(numDocs); + logger.debug("setting up [{}] docs", numDocs); indexRandom(true, false, true, IntStream.range(0, numDocs) .mapToObj(i -> client().prepareIndex(INDEX, TYPE, String.valueOf(i)).setSource("n", i)) .collect(Collectors.toList())); @@ -102,16 +103,21 @@ private void testCancel(String action, AbstractBulkByScrollRequestBuilder // Scroll by 1 so that cancellation is easier to control builder.source().setSize(1); - /* Allow a random number of the documents less the number of workers to be modified by the reindex action. That way at least one - * worker is blocked. */ + /* Allow a random number of the documents less the number of workers + * to be modified by the reindex action. That way at least one worker + * is blocked. */ int numModifiedDocs = randomIntBetween(builder.request().getSlices() * 2, numDocs); + logger.debug("chose to modify [{}] docs", numModifiedDocs); ALLOWED_OPERATIONS.release(numModifiedDocs - builder.request().getSlices()); // Now execute the reindex action... ActionFuture future = builder.execute(); - /* ... and waits for the indexing operation listeners to block. It is important to realize that some of the workers might have - * exhausted their slice while others might have quite a bit left to work on. We can't control that. */ + /* ... and wait for the indexing operation listeners to block. It + * is important to realize that some of the workers might have + * exhausted their slice while others might have quite a bit left + * to work on. We can't control that. */ + logger.debug("waiting for updates to be blocked"); awaitBusy(() -> ALLOWED_OPERATIONS.hasQueuedThreads() && ALLOWED_OPERATIONS.availablePermits() == 0); // Status should show the task running @@ -128,15 +134,19 @@ private void testCancel(String action, AbstractBulkByScrollRequestBuilder cancelTasksResponse.rethrowFailures("Cancel"); assertThat(cancelTasksResponse.getTasks(), hasSize(1)); - // The status should now show canceled. The request will still be in the list because it is (or its children are) still blocked. + /* The status should now show canceled. The request will still be in the + * list because it is (or its children are) still blocked. */ mainTask = client().admin().cluster().prepareGetTask(mainTask.getTaskId()).get().getTask().getTask(); status = (BulkByScrollTask.Status) mainTask.getStatus(); + logger.debug("asserting that parent is marked canceled {}", status); assertEquals(CancelTasksRequest.DEFAULT_REASON, status.getReasonCancelled()); + if (builder.request().getSlices() > 1) { boolean foundCancelled = false; ListTasksResponse sliceList = client().admin().cluster().prepareListTasks().setParentTaskId(mainTask.getTaskId()) .setDetailed(true).get(); sliceList.rethrowFailures("Fetch slice tasks"); + logger.debug("finding at least one canceled child among {}", sliceList.getTasks()); for (TaskInfo slice: sliceList.getTasks()) { BulkByScrollTask.Status sliceStatus = (BulkByScrollTask.Status) slice.getStatus(); if (sliceStatus.getReasonCancelled() == null) continue; @@ -146,7 +156,7 @@ private void testCancel(String action, AbstractBulkByScrollRequestBuilder assertTrue("Didn't find at least one sub task that was cancelled", foundCancelled); } - // Unblock the last operations + logger.debug("unblocking the blocked update"); ALLOWED_OPERATIONS.release(builder.request().getSlices()); // Checks that no more operations are executed diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java index f101b12538289..f5e234f66ca57 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java @@ -107,6 +107,14 @@ public void testResponseOnSearchFailure() throws Exception { response.get(); logger.info("Didn't trigger a reindex failure on the {} attempt", attempt); attempt++; + /* + * In the past we've seen the delete of the source index + * actually take effect *during* the `indexDocs` call in + * the next step. This breaks things pretty disasterously + * so we *try* and wait for the delete to be fully + * complete here. + */ + assertBusy(() -> assertFalse(client().admin().indices().prepareExists("source").get().isExists())); } catch (ExecutionException e) { logger.info("Triggered a reindex failure on the {} attempt: {}", attempt, e.getMessage()); assertThat(e.getMessage(), diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java index f927f920f9097..a4502a953dbe0 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java @@ -99,8 +99,8 @@ public boolean equals(Object o) { } @Override - public void checkCompatibility(MappedFieldType otherFT, List conflicts, boolean strict) { - super.checkCompatibility(otherFT, conflicts, strict); + public void checkCompatibility(MappedFieldType otherFT, List conflicts) { + super.checkCompatibility(otherFT, conflicts); CollationFieldType other = (CollationFieldType) otherFT; if (!Objects.equals(collator, other.collator)) { conflicts.add("mapper [" + name() + "] has different [collator]"); @@ -619,8 +619,8 @@ protected String contentType() { } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); List conflicts = new ArrayList<>(); ICUCollationKeywordFieldMapper icuMergeWith = (ICUCollationKeywordFieldMapper) mergeWith; diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java index 060a94a9d27b4..88f92d0aad8ba 100644 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java @@ -434,7 +434,7 @@ public void testUpdateCollator() throws IOException { .field("language", "tr") .field("strength", "primary") .endObject().endObject().endObject().endObject().string(); - indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()); + indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") @@ -443,7 +443,7 @@ public void testUpdateCollator() throws IOException { .endObject().endObject().endObject().endObject().string(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("type", - new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, randomBoolean())); + new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE)); assertEquals("Can't merge because of conflicts: [Cannot update language setting for [" + FIELD_TYPE + "], Cannot update strength setting for [" + FIELD_TYPE + "]]", e.getMessage()); } diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index e5ba37d9cb0d6..6f177f7b7f5b2 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -67,7 +67,7 @@ task createKey(type: LoggedExec) { project.delete(keystore.parentFile) keystore.parentFile.mkdirs() } - executable = new File(project.javaHome, 'bin/keytool') + executable = new File(project.runtimeJavaHome, 'bin/keytool') standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8')) args '-genkey', '-alias', 'test-node', diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 88f25f72e72f9..7daf944f81898 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -83,5 +83,8 @@ thirdPartyAudit.excludes = [ ] if (JavaVersion.current() > JavaVersion.VERSION_1_8) { - thirdPartyAudit.excludes += ['javax.xml.bind.DatatypeConverter'] + thirdPartyAudit.excludes += [ + 'javax.xml.bind.DatatypeConverter', + 'javax.xml.bind.JAXBContext' + ] } diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index fa6f33a633cd4..82de9ba031b25 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -35,7 +35,7 @@ task createKey(type: LoggedExec) { project.delete(keystore.parentFile) keystore.parentFile.mkdirs() } - executable = new File(project.javaHome, 'bin/keytool') + executable = new File(project.runtimeJavaHome, 'bin/keytool') standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8')) args '-genkey', '-alias', 'test-node', diff --git a/plugins/examples/meta-plugin/build.gradle b/plugins/examples/meta-plugin/build.gradle index 3674837b0b2f9..db28e6378713e 100644 --- a/plugins/examples/meta-plugin/build.gradle +++ b/plugins/examples/meta-plugin/build.gradle @@ -18,39 +18,11 @@ */ // A meta plugin packaging example that bundles multiple plugins in a single zip. -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.rest-test' -File plugins = new File(buildDir, 'plugins-unzip') -subprojects { - // unzip the subproject plugins - task unzip(type:Copy, dependsOn: "${project.path}:bundlePlugin") { - File dest = new File(plugins, project.name) - from { zipTree(project(project.path).bundlePlugin.outputs.files.singleFile) } - eachFile { f -> f.path = f.path.replaceFirst('elasticsearch', '') } - into dest - } -} - -// Build the meta plugin zip from the subproject plugins (unzipped) -task buildZip(type:Zip) { - subprojects.each { dependsOn("${it.name}:unzip") } - from plugins - from 'src/main/resources/meta-plugin-descriptor.properties' - into 'elasticsearch' - includeEmptyDirs false -} - -integTestCluster { - dependsOn buildZip - - // This is important, so that all the modules are available too. - // There are index templates that use token filters that are in analysis-module and - // processors are being used that are in ingest-common module. - distribution = 'zip' +apply plugin: 'elasticsearch.es-meta-plugin' - // Install the meta plugin before start. - setupCommand 'installMetaPlugin', - 'bin/elasticsearch-plugin', 'install', 'file:' + buildZip.archivePath +es_meta_plugin { + name 'meta-plugin' + description 'example meta plugin' + plugins = ['dummy-plugin1', 'dummy-plugin2'] } -check.dependsOn integTest diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index a57d8f880bcfc..3bca078bd59c4 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -2104,8 +2104,6 @@ if (JavaVersion.current() > JavaVersion.VERSION_1_8) { 'javax.xml.bind.DatatypeConverter', 'javax.xml.bind.JAXBContext', 'javax.xml.bind.JAXBElement', - 'javax.xml.bind.Marshaller', - 'javax.xml.bind.Unmarshaller', - 'javax.xml.bind.helpers.DefaultValidationEventHandler', + 'javax.xml.bind.Unmarshaller' ] } diff --git a/plugins/jvm-example/build.gradle b/plugins/jvm-example/build.gradle index 78e54d8bc817e..7a229a396f7d2 100644 --- a/plugins/jvm-example/build.gradle +++ b/plugins/jvm-example/build.gradle @@ -38,7 +38,7 @@ dependencies { task exampleFixture(type: org.elasticsearch.gradle.test.AntFixture) { dependsOn project.configurations.exampleFixture - executable = new File(project.javaHome, 'bin/java') + executable = new File(project.runtimeJavaHome, 'bin/java') args '-cp', "${ -> project.configurations.exampleFixture.asPath }", 'example.ExampleTestFixture', baseDir diff --git a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java index b2974f139fb35..d3830ab210662 100644 --- a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java +++ b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java @@ -183,7 +183,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { SizeFieldMapper sizeFieldMapperMergeWith = (SizeFieldMapper) mergeWith; if (sizeFieldMapperMergeWith.enabledState != enabledState && !sizeFieldMapperMergeWith.enabledState.unset()) { this.enabledState = sizeFieldMapperMergeWith.enabledState; diff --git a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java index 2cde1b1bd07d2..c433f0d256a97 100644 --- a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java +++ b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java @@ -110,7 +110,7 @@ public void testThatDisablingWorksWhenMerging() throws Exception { .startObject("_size").field("enabled", false).endObject() .endObject().endObject().string(); docMapper = service.mapperService().merge("type", new CompressedXContent(disabledMapping), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); assertThat(docMapper.metadataMapper(SizeFieldMapper.class).enabled(), is(false)); } diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 19ca4c0148256..631157a7e175b 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -114,7 +114,7 @@ for (String principal : principals) { for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', 'secureHaHdfsFixture']) { project.tasks.create(fixtureName, org.elasticsearch.gradle.test.AntFixture) { dependsOn project.configurations.hdfsFixture - executable = new File(project.javaHome, 'bin/java') + executable = new File(project.runtimeJavaHome, 'bin/java') env 'CLASSPATH', "${ -> project.configurations.hdfsFixture.asPath }" final List miniHDFSArgs = [] @@ -124,7 +124,7 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', dependsOn krb5kdcFixture, krb5AddPrincipals Path krb5Config = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("conf").resolve("krb5.conf") miniHDFSArgs.add("-Djava.security.krb5.conf=${krb5Config}"); - if (project.rootProject.ext.javaVersion == JavaVersion.VERSION_1_9) { + if (project.runtimeJavaVersion == JavaVersion.VERSION_1_9) { miniHDFSArgs.add('--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED') } } @@ -170,7 +170,7 @@ project.afterEvaluate { restIntegTestTask.clusterConfig.extraConfigFile("repository-hdfs/krb5.keytab", "${elasticsearchKT}") jvmArgs = jvmArgs + " " + "-Djava.security.krb5.conf=${krb5conf}" - if (project.rootProject.ext.javaVersion == JavaVersion.VERSION_1_9) { + if (project.runtimeJavaVersion == JavaVersion.VERSION_1_9) { jvmArgs = jvmArgs + " " + '--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED' } @@ -181,7 +181,7 @@ project.afterEvaluate { restIntegTestTaskRunner.systemProperty "test.krb5.principal.es", "elasticsearch@${realm}" restIntegTestTaskRunner.systemProperty "test.krb5.principal.hdfs", "hdfs/hdfs.build.elastic.co@${realm}" restIntegTestTaskRunner.jvmArg "-Djava.security.krb5.conf=${krb5conf}" - if (project.rootProject.ext.javaVersion == JavaVersion.VERSION_1_9) { + if (project.runtimeJavaVersion == JavaVersion.VERSION_1_9) { restIntegTestTaskRunner.jvmArg '--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED' } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index 9917bf79f593b..acea1ca5d482e 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -31,15 +31,15 @@ import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.nio.AcceptingSelector; import org.elasticsearch.nio.AcceptorEventHandler; -import org.elasticsearch.nio.BytesReadContext; -import org.elasticsearch.nio.BytesWriteContext; import org.elasticsearch.nio.ChannelFactory; import org.elasticsearch.nio.InboundChannelBuffer; +import org.elasticsearch.nio.BytesChannelContext; import org.elasticsearch.nio.NioGroup; import org.elasticsearch.nio.NioSocketChannel; -import org.elasticsearch.nio.ReadContext; +import org.elasticsearch.nio.ServerChannelContext; import org.elasticsearch.nio.SocketEventHandler; import org.elasticsearch.nio.SocketSelector; import org.elasticsearch.threadpool.ThreadPool; @@ -53,6 +53,7 @@ import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.util.concurrent.ConcurrentMap; +import java.util.function.BiConsumer; import java.util.function.Supplier; import static org.elasticsearch.common.settings.Setting.intSetting; @@ -72,12 +73,12 @@ public class NioTransport extends TcpTransport { public static final Setting NIO_ACCEPTOR_COUNT = intSetting("transport.nio.acceptor_count", 1, 1, Setting.Property.NodeScope); - private final PageCacheRecycler pageCacheRecycler; + protected final PageCacheRecycler pageCacheRecycler; private final ConcurrentMap profileToChannelFactory = newConcurrentMap(); private volatile NioGroup nioGroup; private volatile TcpChannelFactory clientChannelFactory; - NioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, + protected NioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { super("nio", settings, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService); @@ -111,13 +112,13 @@ protected void doStart() { NioTransport.NIO_WORKER_COUNT.get(settings), SocketEventHandler::new); ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default"); - clientChannelFactory = new TcpChannelFactory(clientProfileSettings); + clientChannelFactory = channelFactory(clientProfileSettings, true); if (useNetworkServer) { // loop through all profiles and start them up, special handling for default one for (ProfileSettings profileSettings : profileSettings) { String profileName = profileSettings.profileName; - TcpChannelFactory factory = new TcpChannelFactory(profileSettings); + TcpChannelFactory factory = channelFactory(profileSettings, false); profileToChannelFactory.putIfAbsent(profileName, factory); bindServer(profileSettings); } @@ -144,19 +145,30 @@ protected void stopInternal() { profileToChannelFactory.clear(); } - private void exceptionCaught(NioSocketChannel channel, Exception exception) { + protected void exceptionCaught(NioSocketChannel channel, Exception exception) { onException((TcpChannel) channel, exception); } - private void acceptChannel(NioSocketChannel channel) { + protected void acceptChannel(NioSocketChannel channel) { serverAcceptedChannel((TcpNioSocketChannel) channel); } - private class TcpChannelFactory extends ChannelFactory { + protected TcpChannelFactory channelFactory(ProfileSettings settings, boolean isClient) { + return new TcpChannelFactoryImpl(settings); + } + + protected abstract class TcpChannelFactory extends ChannelFactory { + + protected TcpChannelFactory(RawChannelFactory rawChannelFactory) { + super(rawChannelFactory); + } + } + + private class TcpChannelFactoryImpl extends TcpChannelFactory { private final String profileName; - TcpChannelFactory(TcpTransport.ProfileSettings profileSettings) { + private TcpChannelFactoryImpl(ProfileSettings profileSettings) { super(new RawChannelFactory(profileSettings.tcpNoDelay, profileSettings.tcpKeepAlive, profileSettings.reuseAddress, @@ -172,18 +184,21 @@ public TcpNioSocketChannel createChannel(SocketSelector selector, SocketChannel Recycler.V bytes = pageCacheRecycler.bytePage(false); return new InboundChannelBuffer.Page(ByteBuffer.wrap(bytes.v()), bytes::close); }; - ReadContext.ReadConsumer nioReadConsumer = channelBuffer -> + SocketChannelContext.ReadConsumer nioReadConsumer = channelBuffer -> consumeNetworkReads(nioChannel, BytesReference.fromByteBuffers(channelBuffer.sliceBuffersTo(channelBuffer.getIndex()))); - BytesReadContext readContext = new BytesReadContext(nioChannel, nioReadConsumer, new InboundChannelBuffer(pageSupplier)); - nioChannel.setContexts(readContext, new BytesWriteContext(nioChannel), NioTransport.this::exceptionCaught); + BiConsumer exceptionHandler = NioTransport.this::exceptionCaught; + BytesChannelContext context = new BytesChannelContext(nioChannel, exceptionHandler, nioReadConsumer, + new InboundChannelBuffer(pageSupplier)); + nioChannel.setContext(context); return nioChannel; } @Override public TcpNioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { - TcpNioServerSocketChannel nioServerChannel = new TcpNioServerSocketChannel(profileName, channel, this, selector); - nioServerChannel.setAcceptContext(NioTransport.this::acceptChannel); - return nioServerChannel; + TcpNioServerSocketChannel nioChannel = new TcpNioServerSocketChannel(profileName, channel, this, selector); + ServerChannelContext context = new ServerChannelContext(nioChannel, NioTransport.this::acceptChannel, (c, e) -> {}); + nioChannel.setContext(context); + return nioChannel; } } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java index 7f657c763486d..683ae146cfb9c 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java @@ -38,9 +38,9 @@ public class TcpNioServerSocketChannel extends NioServerSocketChannel implements private final String profile; - TcpNioServerSocketChannel(String profile, ServerSocketChannel socketChannel, - ChannelFactory channelFactory, - AcceptingSelector selector) throws IOException { + public TcpNioServerSocketChannel(String profile, ServerSocketChannel socketChannel, + ChannelFactory channelFactory, + AcceptingSelector selector) throws IOException { super(socketChannel, channelFactory, selector); this.profile = profile; } @@ -60,6 +60,11 @@ public InetSocketAddress getRemoteAddress() { return null; } + @Override + public void close() { + getSelector().queueChannelClose(this); + } + @Override public String getProfile() { return profile; diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java index 5633899a04b9f..c2064e53ca64f 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java @@ -33,13 +33,13 @@ public class TcpNioSocketChannel extends NioSocketChannel implements TcpChannel private final String profile; - TcpNioSocketChannel(String profile, SocketChannel socketChannel, SocketSelector selector) throws IOException { + public TcpNioSocketChannel(String profile, SocketChannel socketChannel, SocketSelector selector) throws IOException { super(socketChannel, selector); this.profile = profile; } public void sendMessage(BytesReference reference, ActionListener listener) { - getWriteContext().sendMessage(BytesReference.toByteBuffers(reference), ActionListener.toBiConsumer(listener)); + getContext().sendMessage(BytesReference.toByteBuffers(reference), ActionListener.toBiConsumer(listener)); } @Override @@ -59,6 +59,11 @@ public void addCloseListener(ActionListener listener) { addCloseListener(ActionListener.toBiConsumer(listener)); } + @Override + public void close() { + getContext().closeChannel(); + } + @Override public String toString() { return "TcpNioSocketChannel{" + diff --git a/qa/build.gradle b/qa/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/qa/reindex-from-old/build.gradle b/qa/reindex-from-old/build.gradle index adff0361e29b8..c9388c42bf54a 100644 --- a/qa/reindex-from-old/build.gradle +++ b/qa/reindex-from-old/build.gradle @@ -51,7 +51,7 @@ dependencies { es090 'org.elasticsearch:elasticsearch:0.90.13@zip' } -if (project.javaVersion >= JavaVersion.VERSION_1_9 || Os.isFamily(Os.FAMILY_WINDOWS)) { +if (project.runtimeJavaVersion >= JavaVersion.VERSION_1_9 || Os.isFamily(Os.FAMILY_WINDOWS)) { /* We can't run the dependencies with Java 9 so for now we'll skip the whole * thing. We can't get the pid files in windows so we skip that as well.... */ integTest.enabled = false @@ -73,8 +73,9 @@ if (project.javaVersion >= JavaVersion.VERSION_1_9 || Os.isFamily(Os.FAMILY_WIND type: org.elasticsearch.gradle.test.AntFixture) { dependsOn project.configurations.oldesFixture dependsOn unzip - executable = new File(project.javaHome, 'bin/java') + executable = new File(project.runtimeJavaHome, 'bin/java') env 'CLASSPATH', "${ -> project.configurations.oldesFixture.asPath }" + env 'JAVA_HOME', project.runtimeJavaHome args 'oldes.OldElasticsearch', baseDir, unzip.temporaryDir, diff --git a/qa/smoke-test-rank-eval-with-mustache/src/test/resources/rest-api-spec/test/rank-eval/30_template.yml b/qa/smoke-test-rank-eval-with-mustache/src/test/resources/rest-api-spec/test/rank-eval/30_template.yml index 0a59b7d073325..692a2e2123058 100644 --- a/qa/smoke-test-rank-eval-with-mustache/src/test/resources/rest-api-spec/test/rank-eval/30_template.yml +++ b/qa/smoke-test-rank-eval-with-mustache/src/test/resources/rest-api-spec/test/rank-eval/30_template.yml @@ -2,8 +2,8 @@ "Template request": - skip: - version: " - 6.99.99" - reason: the ranking evaluation feature is only available on 7.0 + version: " - 6.1.99" + reason: the ranking evaluation feature is available since 6.2 - do: indices.create: @@ -67,6 +67,6 @@ "metric" : { "precision": { }} } - - match: {rank_eval.quality_level: 0.5833333333333333} - - match: {rank_eval.details.berlin_query.unknown_docs.0._id: "doc4"} - - match: {rank_eval.details.amsterdam_query.unknown_docs.0._id: "doc4"} + - match: {quality_level: 0.5833333333333333} + - match: {details.berlin_query.unknown_docs.0._id: "doc4"} + - match: {details.amsterdam_query.unknown_docs.0._id: "doc4"} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json index 1433c893e251e..f876df36f882b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json @@ -24,10 +24,6 @@ "master_timeout": { "type" : "time", "description" : "Specify timeout for connection to master" - }, - "update_all_types": { - "type": "boolean", - "description": "Whether to update the mapping for all fields with the same name across all types or not" } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json index 5fce0bcefc89a..c6b547914ef79 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json @@ -38,10 +38,6 @@ "options" : ["open","closed","none","all"], "default" : "open", "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "update_all_types": { - "type": "boolean", - "description": "Whether to update the mapping for all fields with the same name across all types or not" } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/13_fields.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/13_fields.yml index 7be97cda1fe0c..82655c5778d27 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/13_fields.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/13_fields.yml @@ -4,7 +4,14 @@ setup: - do: indices.create: index: test1 + wait_for_active_shards: all body: + settings: + # Limit the number of shards so that shards are unlikely + # to be relocated or being initialized between the test + # set up and the test execution + index.number_of_shards: 3 + index.number_of_replicas: 0 mappings: bar: properties: @@ -20,6 +27,11 @@ setup: fields: completion: type: completion + + - do: + cluster.health: + wait_for_no_relocating_shards: true + - do: index: index: test1 @@ -29,10 +41,10 @@ setup: - do: index: - index: test2 - type: baz - id: 1 - body: { "bar": "bar", "baz": "baz" } + index: test1 + type: bar + id: 2 + body: { "bar": "foo", "baz": "foo" } - do: indices.refresh: {} @@ -57,18 +69,17 @@ setup: completion: field: baz.completion - - do: - indices.refresh: {} - - do: search: - sort: bar,baz + body: + sort: [ "bar", "baz" ] --- "Fields - blank": - do: indices.stats: {} + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields - gt: { _all.total.completion.size_in_bytes: 0 } @@ -79,6 +90,7 @@ setup: - do: indices.stats: { fields: bar } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz @@ -90,6 +102,7 @@ setup: - do: indices.stats: { fields: "bar,baz.completion" } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz @@ -102,6 +115,7 @@ setup: - do: indices.stats: { fields: "*" } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.baz.memory_size_in_bytes: 0 } @@ -114,6 +128,7 @@ setup: - do: indices.stats: { fields: "bar*" } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz @@ -126,6 +141,7 @@ setup: - do: indices.stats: { fields: "bar*", metric: _all } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz @@ -138,6 +154,7 @@ setup: - do: indices.stats: { fields: "bar*", metric: fielddata } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz @@ -148,6 +165,7 @@ setup: - do: indices.stats: { fields: "bar*", metric: completion } + - match: { _shards.failed: 0} - is_false: _all.total.fielddata - gt: { _all.total.completion.size_in_bytes: 0 } - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } @@ -158,6 +176,7 @@ setup: - do: indices.stats: { fields: "bar*" , metric: [ completion, fielddata, search ]} + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz @@ -170,6 +189,7 @@ setup: - do: indices.stats: { fielddata_fields: bar } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz - is_false: _all.total.completion.fields @@ -179,6 +199,7 @@ setup: - do: indices.stats: { fielddata_fields: "bar,baz,baz.completion" } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.baz.memory_size_in_bytes: 0 } - is_false: _all.total.completion.fields @@ -188,6 +209,7 @@ setup: - do: indices.stats: { fielddata_fields: "*" } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.baz.memory_size_in_bytes: 0 } - is_false: _all.total.completion.fields @@ -197,6 +219,7 @@ setup: - do: indices.stats: { fielddata_fields: "*r" } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz - is_false: _all.total.completion.fields @@ -207,6 +230,7 @@ setup: - do: indices.stats: { fielddata_fields: "*r", metric: _all } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz - is_false: _all.total.completion.fields @@ -216,6 +240,7 @@ setup: - do: indices.stats: { fielddata_fields: "*r", metric: fielddata } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz - is_false: _all.total.completion.fields @@ -226,6 +251,7 @@ setup: - do: indices.stats: { fielddata_fields: "*r", metric: [ fielddata, search] } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz - is_false: _all.total.completion.fields @@ -236,6 +262,7 @@ setup: - do: indices.stats: { completion_fields: bar.completion } + - match: { _shards.failed: 0} - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } - is_false: _all.total.completion.fields.baz\.completion - is_false: _all.total.fielddata.fields @@ -245,6 +272,7 @@ setup: - do: indices.stats: { completion_fields: "bar.completion,baz,baz.completion" } + - match: { _shards.failed: 0} - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } - gt: { _all.total.completion.fields.baz\.completion.size_in_bytes: 0 } - is_false: _all.total.fielddata.fields @@ -254,6 +282,7 @@ setup: - do: indices.stats: { completion_fields: "*" } + - match: { _shards.failed: 0} - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } - gt: { _all.total.completion.fields.baz\.completion.size_in_bytes: 0 } - is_false: _all.total.fielddata.fields @@ -263,6 +292,7 @@ setup: - do: indices.stats: { completion_fields: "*r*" } + - match: { _shards.failed: 0} - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } - is_false: _all.total.completion.fields.baz\.completion - is_false: _all.total.fielddata.fields @@ -272,6 +302,7 @@ setup: - do: indices.stats: { completion_fields: "*r*", metric: _all } + - match: { _shards.failed: 0} - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } - is_false: _all.total.completion.fields.baz\.completion - is_false: _all.total.fielddata.fields @@ -281,6 +312,7 @@ setup: - do: indices.stats: { completion_fields: "*r*", metric: completion } + - match: { _shards.failed: 0} - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } - is_false: _all.total.completion.fields.baz\.completion - is_false: _all.total.fielddata.fields @@ -290,6 +322,7 @@ setup: - do: indices.stats: { completion_fields: "*r*", metric: [ completion, search ] } + - match: { _shards.failed: 0} - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } - is_false: _all.total.completion.fields.baz\.completion - is_false: _all.total.fielddata.fields diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml index aaf277d171ba0..e094c47ff422b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml @@ -7,6 +7,8 @@ setup: mappings: doc: properties: + date: + type: date keyword: type: keyword long: @@ -40,6 +42,20 @@ setup: id: 4 body: { "keyword": "bar", "long": [1000, 0] } + - do: + index: + index: test + type: doc + id: 5 + body: { "date": "2017-10-20T03:08:45" } + + - do: + index: + index: test + type: doc + id: 6 + body: { "date": "2017-10-21T07:00:00" } + - do: indices.refresh: index: [test] @@ -66,7 +82,7 @@ setup: } ] - - match: {hits.total: 4} + - match: {hits.total: 6} - length: { aggregations.test.buckets: 2 } - match: { aggregations.test.buckets.0.key.kw: "bar" } - match: { aggregations.test.buckets.0.doc_count: 3 } @@ -104,7 +120,7 @@ setup: } ] - - match: {hits.total: 4} + - match: {hits.total: 6} - length: { aggregations.test.buckets: 5 } - match: { aggregations.test.buckets.0.key.long: 0} - match: { aggregations.test.buckets.0.key.kw: "bar" } @@ -154,7 +170,7 @@ setup: ] after: { "long": 20, "kw": "foo" } - - match: {hits.total: 4} + - match: {hits.total: 6} - length: { aggregations.test.buckets: 2 } - match: { aggregations.test.buckets.0.key.long: 100 } - match: { aggregations.test.buckets.0.key.kw: "bar" } @@ -188,7 +204,7 @@ setup: ] after: { "kw": "delta" } - - match: {hits.total: 4} + - match: {hits.total: 6} - length: { aggregations.test.buckets: 1 } - match: { aggregations.test.buckets.0.key.kw: "foo" } - match: { aggregations.test.buckets.0.doc_count: 2 } @@ -220,3 +236,62 @@ setup: } } ] + +--- +"Composite aggregation with format": + - skip: + version: " - 6.99.99" + reason: this uses a new option (format) added in 7.0.0 + + - do: + search: + index: test + body: + aggregations: + test: + composite: + sources: [ + { + "date": { + "date_histogram": { + "field": "date", + "interval": "1d", + "format": "yyyy-MM-dd" + } + } + } + ] + + - match: {hits.total: 6} + - length: { aggregations.test.buckets: 2 } + - match: { aggregations.test.buckets.0.key.date: "2017-10-20" } + - match: { aggregations.test.buckets.0.doc_count: 1 } + - match: { aggregations.test.buckets.1.key.date: "2017-10-21" } + - match: { aggregations.test.buckets.1.doc_count: 1 } + + - do: + search: + index: test + body: + aggregations: + test: + composite: + after: { + date: "2017-10-20" + } + sources: [ + { + "date": { + "date_histogram": { + "field": "date", + "interval": "1d", + "format": "yyyy-MM-dd" + } + } + } + ] + + - match: {hits.total: 6} + - length: { aggregations.test.buckets: 1 } + - match: { aggregations.test.buckets.0.key.date: "2017-10-21" } + - match: { aggregations.test.buckets.0.doc_count: 1 } diff --git a/server/build.gradle b/server/build.gradle index 4f69c2ee159b5..c11c88dfc6e82 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -36,6 +36,29 @@ publishing { archivesBaseName = 'elasticsearch' +// we want to keep the JDKs in our IDEs set to JDK 8 until minimum JDK is bumped to 9 so we do not include this source set in our IDEs +if (!isEclipse && !isIdea) { + sourceSets { + java9 { + java { + srcDirs = ['src/main/java9'] + } + } + } + + compileJava9Java { + sourceCompatibility = 9 + targetCompatibility = 9 + } + + jar { + into('META-INF/versions/9') { + from sourceSets.java9.output + } + manifest.attributes('Multi-Release': 'true') + } +} + dependencies { compile "org.elasticsearch:elasticsearch-core:${version}" @@ -67,7 +90,7 @@ dependencies { compile 'com.carrotsearch:hppc:0.7.1' // time handling, remove with java 8 time - compile 'joda-time:joda-time:2.9.5' + compile 'joda-time:joda-time:2.9.9' // json and yaml compile "org.yaml:snakeyaml:${versions.snakeyaml}" @@ -94,6 +117,10 @@ dependencies { // repackaged jna with native bits linked against all elastic supported platforms compile "org.elasticsearch:jna:${versions.jna}" + if (!isEclipse && !isIdea) { + java9Compile sourceSets.main.output + } + if (isEclipse == false || project.path == ":server-tests") { testCompile("org.elasticsearch.test:framework:${version}") { // tests use the locally compiled version of server diff --git a/server/licenses/joda-time-2.9.5.jar.sha1 b/server/licenses/joda-time-2.9.5.jar.sha1 deleted file mode 100644 index ecf1c781556ee..0000000000000 --- a/server/licenses/joda-time-2.9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5f01da7306363fad2028b916f3eab926262de928 \ No newline at end of file diff --git a/server/licenses/joda-time-2.9.9.jar.sha1 b/server/licenses/joda-time-2.9.9.jar.sha1 new file mode 100644 index 0000000000000..4009932ea3beb --- /dev/null +++ b/server/licenses/joda-time-2.9.9.jar.sha1 @@ -0,0 +1 @@ +f7b520c458572890807d143670c9b24f4de90897 \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 8a4bc0752be3f..b741c34fab98a 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -109,6 +109,8 @@ public class Version implements Comparable { public static final Version V_5_6_5 = new Version(V_5_6_5_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); public static final int V_5_6_6_ID = 5060699; public static final Version V_5_6_6 = new Version(V_5_6_6_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); + public static final int V_5_6_7_ID = 5060799; + public static final Version V_5_6_7 = new Version(V_5_6_7_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); public static final int V_6_0_0_alpha1_ID = 6000001; public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0); @@ -142,8 +144,12 @@ public class Version implements Comparable { public static final Version V_6_1_1 = new Version(V_6_1_1_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); public static final int V_6_1_2_ID = 6010299; public static final Version V_6_1_2 = new Version(V_6_1_2_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); + public static final int V_6_1_3_ID = 6010399; + public static final Version V_6_1_3 = new Version(V_6_1_3_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); public static final int V_6_2_0_ID = 6020099; public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final int V_6_3_0_ID = 6030099; + public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); public static final int V_7_0_0_alpha1_ID = 7000001; public static final Version V_7_0_0_alpha1 = new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); @@ -162,8 +168,12 @@ public static Version fromId(int id) { switch (id) { case V_7_0_0_alpha1_ID: return V_7_0_0_alpha1; + case V_6_3_0_ID: + return V_6_3_0; case V_6_2_0_ID: return V_6_2_0; + case V_6_1_3_ID: + return V_6_1_3; case V_6_1_2_ID: return V_6_1_2; case V_6_1_1_ID: @@ -188,6 +198,8 @@ public static Version fromId(int id) { return V_6_0_0_alpha2; case V_6_0_0_alpha1_ID: return V_6_0_0_alpha1; + case V_5_6_7_ID: + return V_5_6_7; case V_5_6_6_ID: return V_5_6_6; case V_5_6_5_ID: diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index b7ce0407681cf..6edc95f649d40 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -62,8 +62,7 @@ protected GetAliasesResponse newResponse() { @Override protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener listener) { String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request); - @SuppressWarnings("unchecked") - ImmutableOpenMap> result = (ImmutableOpenMap) state.metaData().findAliases(request.aliases(), concreteIndices); + ImmutableOpenMap> result = state.metaData().findAliases(request.aliases(), concreteIndices); listener.onResponse(new GetAliasesResponse(result)); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java index cd3355cae8766..d0f4b3cc20beb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.admin.indices.cache.clear; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -38,7 +38,8 @@ public class ClearIndicesCacheResponse extends BroadcastResponse { } - ClearIndicesCacheResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { + ClearIndicesCacheResponse(int totalShards, int successfulShards, int failedShards, + List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java index 0ad94db7b1f30..eda82fb710ca0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java @@ -19,8 +19,8 @@ package org.elasticsearch.action.admin.indices.cache.clear; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -65,7 +65,7 @@ protected EmptyResult readShardResult(StreamInput in) throws IOException { @Override protected ClearIndicesCacheResponse newResponse(ClearIndicesCacheRequest request, int totalShards, int successfulShards, int failedShards, List responses, - List shardFailures, ClusterState clusterState) { + List shardFailures, ClusterState clusterState) { return new ClearIndicesCacheResponse(totalShards, successfulShards, failedShards, shardFailures); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java index b85962c0f55ed..4607586d9fa91 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java @@ -22,13 +22,23 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; /** * A response for a close index action. */ -public class CloseIndexResponse extends AcknowledgedResponse { +public class CloseIndexResponse extends AcknowledgedResponse implements ToXContentObject { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("close_index", true, + args -> new CloseIndexResponse((boolean) args[0])); + + static { + declareAcknowledgedField(PARSER); + } CloseIndexResponse() { } @@ -48,4 +58,16 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeAcknowledged(out); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + addAcknowledgedField(builder); + builder.endObject(); + return builder; + } + + public static CloseIndexResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index 244b8a24b9b67..362f54b74ab36 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.master.TransportMasterNodeAction; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java index 1734c340bd4ef..4e2e257887512 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java @@ -43,7 +43,6 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ private final String cause; private final String index; private final String providedName; - private final boolean updateAllTypes; private Index recoverFrom; private ResizeType resizeType; @@ -61,12 +60,10 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; - public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, String providedName, - boolean updateAllTypes) { + public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, String providedName) { this.originalMessage = originalMessage; this.cause = cause; this.index = index; - this.updateAllTypes = updateAllTypes; this.providedName = providedName; } @@ -155,11 +152,6 @@ public Index recoverFrom() { return recoverFrom; } - /** True if all fields that span multiple types should be updated, false otherwise */ - public boolean updateAllTypes() { - return updateAllTypes; - } - /** * The name that was provided by the user. This might contain a date math expression. * @see IndexMetaData#SETTING_INDEX_PROVIDED_NAME diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index 17941b582ec31..12f9f75619412 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -85,8 +85,6 @@ public class CreateIndexRequest extends AcknowledgedRequest private final Map customs = new HashMap<>(); - private boolean updateAllTypes = false; - private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; public CreateIndexRequest() { @@ -429,17 +427,6 @@ public Map customs() { return this.customs; } - /** True if all fields that span multiple types should be updated, false otherwise */ - public boolean updateAllTypes() { - return updateAllTypes; - } - - /** See {@link #updateAllTypes()} */ - public CreateIndexRequest updateAllTypes(boolean updateAllTypes) { - this.updateAllTypes = updateAllTypes; - return this; - } - public ActiveShardCount waitForActiveShards() { return waitForActiveShards; } @@ -499,7 +486,9 @@ public void readFrom(StreamInput in) throws IOException { for (int i = 0; i < aliasesSize; i++) { aliases.add(Alias.read(in)); } - updateAllTypes = in.readBoolean(); + if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + in.readBoolean(); // updateAllTypes + } waitForActiveShards = ActiveShardCount.readFrom(in); } @@ -523,7 +512,9 @@ public void writeTo(StreamOutput out) throws IOException { for (Alias alias : aliases) { alias.writeTo(out); } - out.writeBoolean(updateAllTypes); + if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + out.writeBoolean(true); // updateAllTypes + } waitForActiveShards.writeTo(out); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java index fabe269124e9e..b42b4e9236f0e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java @@ -239,12 +239,6 @@ public CreateIndexRequestBuilder setSource(XContentBuilder source) { return this; } - /** True if all fields that span multiple types should be updated, false otherwise */ - public CreateIndexRequestBuilder setUpdateAllTypes(boolean updateAllTypes) { - request.updateAllTypes(updateAllTypes); - return this; - } - /** * Sets the number of shard copies that should be active for index creation to return. * Defaults to {@link ActiveShardCount#DEFAULT}, which will wait for one shard copy diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index 372c2eb861237..4cf159c439cb5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -72,7 +72,7 @@ protected void masterOperation(final CreateIndexRequest request, final ClusterSt } final String indexName = indexNameExpressionResolver.resolveDateMathExpression(request.index()); - final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.index(), request.updateAllTypes()) + final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.index()) .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .settings(request.settings()).mappings(request.mappings()) .aliases(request.aliases()).customs(request.customs()) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java index c2ac70026454c..273fc3e817d46 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.admin.indices.flush; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import java.util.List; @@ -35,7 +35,7 @@ public class FlushResponse extends BroadcastResponse { } - FlushResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { + FlushResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index a29918b438ef3..91755388320a3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -19,8 +19,8 @@ package org.elasticsearch.action.admin.indices.flush; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -57,7 +57,8 @@ protected ShardFlushRequest newShardRequest(FlushRequest request, ShardId shardI } @Override - protected FlushResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List shardFailures) { + protected FlushResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List + shardFailures) { return new FlushResponse(totalNumCopies, successfulShards, failedShards, shardFailures); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java index 3844f00193c5f..f77bb5d6a57de 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.admin.indices.forcemerge; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import java.util.List; @@ -32,7 +32,7 @@ public class ForceMergeResponse extends BroadcastResponse { ForceMergeResponse() { } - ForceMergeResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { + ForceMergeResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java index 18ac88e1b3056..94f27a93624d5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java @@ -19,8 +19,8 @@ package org.elasticsearch.action.admin.indices.forcemerge; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -62,7 +62,7 @@ protected EmptyResult readShardResult(StreamInput in) throws IOException { } @Override - protected ForceMergeResponse newResponse(ForceMergeRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { + protected ForceMergeResponse newResponse(ForceMergeRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { return new ForceMergeResponse(totalShards, successfulShards, failedShards, shardFailures); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java index 0f396afa5513b..0deb63ba285ff 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java @@ -30,8 +30,6 @@ public class PutMappingClusterStateUpdateRequest extends IndicesClusterStateUpda private String source; - private boolean updateAllTypes = false; - public PutMappingClusterStateUpdateRequest() { } @@ -53,13 +51,4 @@ public PutMappingClusterStateUpdateRequest source(String source) { this.source = source; return this; } - - public boolean updateAllTypes() { - return updateAllTypes; - } - - public PutMappingClusterStateUpdateRequest updateAllTypes(boolean updateAllTypes) { - this.updateAllTypes = updateAllTypes; - return this; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index eecbbc453ee4d..03c1308e1d9fd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; @@ -57,7 +58,7 @@ * @see org.elasticsearch.client.IndicesAdminClient#putMapping(PutMappingRequest) * @see PutMappingResponse */ -public class PutMappingRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { +public class PutMappingRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable, ToXContentObject { private static ObjectHashSet RESERVED_FIELDS = ObjectHashSet.from( "_uid", "_id", "_type", "_source", "_all", "_analyzer", "_parent", "_routing", "_index", @@ -72,7 +73,6 @@ public class PutMappingRequest extends AcknowledgedRequest im private String source; - private boolean updateAllTypes = false; private Index concreteIndex; public PutMappingRequest() { @@ -290,17 +290,6 @@ public PutMappingRequest source(BytesReference mappingSource, XContentType xCont } } - /** True if all fields that span multiple types should be updated, false otherwise */ - public boolean updateAllTypes() { - return updateAllTypes; - } - - /** See {@link #updateAllTypes()} */ - public PutMappingRequest updateAllTypes(boolean updateAllTypes) { - this.updateAllTypes = updateAllTypes; - return this; - } - @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -312,7 +301,9 @@ public void readFrom(StreamInput in) throws IOException { // we do not know the format from earlier versions so convert if necessary source = XContentHelper.convertToJson(new BytesArray(source), false, false, XContentFactory.xContentType(source)); } - updateAllTypes = in.readBoolean(); + if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + in.readBoolean(); // updateAllTypes + } concreteIndex = in.readOptionalWriteable(Index::new); } @@ -323,7 +314,19 @@ public void writeTo(StreamOutput out) throws IOException { indicesOptions.writeIndicesOptions(out); out.writeOptionalString(type); out.writeString(source); - out.writeBoolean(updateAllTypes); + if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + out.writeBoolean(true); // updateAllTypes + } out.writeOptionalWriteable(concreteIndex); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (source != null) { + builder.rawValue(new BytesArray(source), XContentType.JSON); + } else { + builder.startObject().endObject(); + } + return builder; + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java index 43bfe78c4871b..7baba39d96a29 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java @@ -98,10 +98,4 @@ public PutMappingRequestBuilder setSource(Object... source) { return this; } - /** True if all fields that span multiple types should be updated, false otherwise */ - public PutMappingRequestBuilder setUpdateAllTypes(boolean updateAllTypes) { - request.updateAllTypes(updateAllTypes); - return this; - } - } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java index 64b3c77f05067..f427a316c2e81 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java @@ -22,13 +22,24 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; /** * The response of put mapping operation. */ -public class PutMappingResponse extends AcknowledgedResponse { +public class PutMappingResponse extends AcknowledgedResponse implements ToXContentObject { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("put_mapping", + true, args -> new PutMappingResponse((boolean) args[0])); + + static { + declareAcknowledgedField(PARSER); + } protected PutMappingResponse() { @@ -49,4 +60,16 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeAcknowledged(out); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + addAcknowledgedField(builder); + builder.endObject(); + return builder; + } + + public static PutMappingResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index d9ebf88fda6d7..e10a20096fa30 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -82,7 +82,6 @@ protected void masterOperation(final PutMappingRequest request, final ClusterSta PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .indices(concreteIndices).type(request.type()) - .updateAllTypes(request.updateAllTypes()) .source(request.source()); metaDataMappingService.putMapping(updateRequest, new ActionListener() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java index 95fef9fc65344..4e98c60265c76 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java @@ -37,7 +37,7 @@ /** * A response for a open index action. */ -public class OpenIndexResponse extends AcknowledgedResponse implements ToXContentObject { +public class OpenIndexResponse extends AcknowledgedResponse implements ToXContentObject { private static final String SHARDS_ACKNOWLEDGED = "shards_acknowledged"; private static final ParseField SHARDS_ACKNOWLEDGED_PARSER = new ParseField(SHARDS_ACKNOWLEDGED); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java index a19393ebd5beb..1a9c86049f8c6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.admin.indices.recovery; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -56,7 +56,8 @@ public RecoveryResponse() { } * @param shardFailures List of failures processing shards */ public RecoveryResponse(int totalShards, int successfulShards, int failedShards, boolean detailed, - Map> shardRecoveryStates, List shardFailures) { + Map> shardRecoveryStates, + List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); this.shardRecoveryStates = shardRecoveryStates; this.detailed = detailed; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java index 01f37527374fc..0e11aed9d24fd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java @@ -19,8 +19,8 @@ package org.elasticsearch.action.admin.indices.recovery; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -69,7 +69,7 @@ protected RecoveryState readShardResult(StreamInput in) throws IOException { @Override - protected RecoveryResponse newResponse(RecoveryRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { + protected RecoveryResponse newResponse(RecoveryRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { Map> shardResponses = new HashMap<>(); for (RecoveryState recoveryState : responses) { if (recoveryState == null) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java index 114d1800982d8..20165d078c5c9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java @@ -19,38 +19,34 @@ package org.elasticsearch.action.admin.indices.refresh; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentParser; +import java.util.Arrays; import java.util.List; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; - /** * The response of a refresh action. */ -public class RefreshResponse extends BroadcastResponse implements ToXContentFragment { +public class RefreshResponse extends BroadcastResponse { - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("refresh", - true, arg -> (RefreshResponse) arg[0]); + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("refresh", true, + arg -> { + BroadcastResponse response = (BroadcastResponse) arg[0]; + return new RefreshResponse(response.getTotalShards(), response.getSuccessfulShards(), response.getFailedShards(), + Arrays.asList(response.getShardFailures())); + }); static { - ConstructingObjectParser shardsParser = new ConstructingObjectParser<>("_shards", true, - arg -> new RefreshResponse((int) arg[0], (int) arg[1], (int) arg[2], null)); - shardsParser.declareInt(constructorArg(), new ParseField(Fields.TOTAL)); - shardsParser.declareInt(constructorArg(), new ParseField(Fields.SUCCESSFUL)); - shardsParser.declareInt(constructorArg(), new ParseField(Fields.FAILED)); - PARSER.declareObject(constructorArg(), shardsParser, new ParseField(Fields._SHARDS)); + declareBroadcastFields(PARSER); } RefreshResponse() { } - RefreshResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { + RefreshResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java index 9752e68517e15..d44783d3c64f1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.indices.refresh; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.replication.BasicReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; @@ -61,7 +61,8 @@ protected BasicReplicationRequest newShardRequest(RefreshRequest request, ShardI } @Override - protected RefreshResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List shardFailures) { + protected RefreshResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, + List shardFailures) { return new RefreshResponse(totalNumCopies, successfulShards, failedShards, shardFailures); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index 2ed5192e6cfb2..ded01077da2af 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -232,7 +232,7 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final Stri createIndexRequest.cause("rollover_index"); createIndexRequest.index(targetIndexName); return new CreateIndexClusterStateUpdateRequest(createIndexRequest, - "rollover_index", targetIndexName, providedIndexName, true) + "rollover_index", targetIndexName, providedIndexName) .ackTimeout(createIndexRequest.timeout()) .masterNodeTimeout(createIndexRequest.masterNodeTimeout()) .settings(createIndexRequest.settings()) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java index 2e241ef1614b9..b9296c0242fdb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -24,7 +24,7 @@ import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.util.Accountable; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -53,7 +53,8 @@ public class IndicesSegmentResponse extends BroadcastResponse implements ToXCont } - IndicesSegmentResponse(ShardSegments[] shards, int totalShards, int successfulShards, int failedShards, List shardFailures) { + IndicesSegmentResponse(ShardSegments[] shards, int totalShards, int successfulShards, int failedShards, + List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); this.shards = shards; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java index 350e8dffa1999..94b12c9ab17d5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java @@ -19,8 +19,8 @@ package org.elasticsearch.action.admin.indices.segments; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -77,7 +77,7 @@ protected ShardSegments readShardResult(StreamInput in) throws IOException { } @Override - protected IndicesSegmentResponse newResponse(IndicesSegmentsRequest request, int totalShards, int successfulShards, int failedShards, List results, List shardFailures, ClusterState clusterState) { + protected IndicesSegmentResponse newResponse(IndicesSegmentsRequest request, int totalShards, int successfulShards, int failedShards, List results, List shardFailures, ClusterState clusterState) { return new IndicesSegmentResponse(results.toArray(new ShardSegments[results.size()]), totalShards, successfulShards, failedShards, shardFailures); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java index 70624380e8611..6cf160897482c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -25,7 +25,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.collect.ImmutableOpenIntMap; @@ -348,7 +347,7 @@ public void writeTo(StreamOutput out) throws IOException { } } out.writeVInt(failures.size()); - for (ShardOperationFailedException failure : failures) { + for (Failure failure : failures) { failure.writeTo(out); } } @@ -357,7 +356,7 @@ public void writeTo(StreamOutput out) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (failures.size() > 0) { builder.startArray(Fields.FAILURES); - for (ShardOperationFailedException failure : failures) { + for (Failure failure : failures) { builder.startObject(); failure.toXContent(builder, params); builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java index 688d33a0be734..28fc994a4677e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -179,7 +179,7 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final Resi targetIndex.settings(settingsBuilder); return new CreateIndexClusterStateUpdateRequest(targetIndex, - cause, targetIndex.index(), targetIndexName, true) + cause, targetIndex.index(), targetIndexName) // mappings are updated on the node when creating in the shards, this prevents race-conditions since all mapping must be // applied once we took the snapshot and if somebody messes things up and switches the index read/write and adds docs we miss // the mappings for everything is corrupted and hard to debug diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java index 5fcd4e5e62e9f..24a0e10e86695 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.admin.indices.stats; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.io.stream.StreamInput; @@ -48,7 +48,8 @@ public class IndicesStatsResponse extends BroadcastResponse implements ToXConten } - IndicesStatsResponse(ShardStats[] shards, int totalShards, int successfulShards, int failedShards, List shardFailures) { + IndicesStatsResponse(ShardStats[] shards, int totalShards, int successfulShards, int failedShards, + List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); this.shards = shards; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index bed820189d1a8..50d7712da11d0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -19,8 +19,8 @@ package org.elasticsearch.action.admin.indices.stats; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -79,7 +79,7 @@ protected ShardStats readShardResult(StreamInput in) throws IOException { } @Override - protected IndicesStatsResponse newResponse(IndicesStatsRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { + protected IndicesStatsResponse newResponse(IndicesStatsRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { return new IndicesStatsResponse(responses.toArray(new ShardStats[responses.size()]), totalShards, successfulShards, failedShards, shardFailures); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java index c2c4424d4c897..19566acaf7af4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java @@ -20,8 +20,8 @@ package org.elasticsearch.action.admin.indices.upgrade.get; import org.elasticsearch.Version; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -79,7 +79,7 @@ protected ShardUpgradeStatus readShardResult(StreamInput in) throws IOException } @Override - protected UpgradeStatusResponse newResponse(UpgradeStatusRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { + protected UpgradeStatusResponse newResponse(UpgradeStatusRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { return new UpgradeStatusResponse(responses.toArray(new ShardUpgradeStatus[responses.size()]), totalShards, successfulShards, failedShards, shardFailures); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java index 565348f5ac22b..71110f18b875c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java @@ -19,11 +19,10 @@ package org.elasticsearch.action.admin.indices.upgrade.get; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -43,7 +42,8 @@ public class UpgradeStatusResponse extends BroadcastResponse implements ToXConte UpgradeStatusResponse() { } - UpgradeStatusResponse(ShardUpgradeStatus[] shards, int totalShards, int successfulShards, int failedShards, List shardFailures) { + UpgradeStatusResponse(ShardUpgradeStatus[] shards, int totalShards, int successfulShards, int failedShards, + List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); this.shards = shards; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java index 87f39336047b2..67e51c8e5575c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java @@ -22,8 +22,8 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.PrimaryMissingActionException; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -71,7 +71,7 @@ public TransportUpgradeAction(Settings settings, ThreadPool threadPool, ClusterS } @Override - protected UpgradeResponse newResponse(UpgradeRequest request, int totalShards, int successfulShards, int failedShards, List shardUpgradeResults, List shardFailures, ClusterState clusterState) { + protected UpgradeResponse newResponse(UpgradeRequest request, int totalShards, int successfulShards, int failedShards, List shardUpgradeResults, List shardFailures, ClusterState clusterState) { Map successfulPrimaryShards = new HashMap<>(); Map> versions = new HashMap<>(); for (ShardUpgradeResult result : shardUpgradeResults) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java index 64e958372cdc8..db49921d43532 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.admin.indices.upgrade.post; import org.elasticsearch.Version; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; @@ -44,7 +44,8 @@ public class UpgradeResponse extends BroadcastResponse { } - UpgradeResponse(Map> versions, int totalShards, int successfulShards, int failedShards, List shardFailures) { + UpgradeResponse(Map> versions, int totalShards, int successfulShards, int failedShards, + List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); this.versions = versions; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index c4369a30586d0..0513a37e4fe0e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -22,7 +22,6 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; @@ -115,7 +114,7 @@ protected ValidateQueryResponse newResponse(ValidateQueryRequest request, Atomic int successfulShards = 0; int failedShards = 0; boolean valid = true; - List shardFailures = null; + List shardFailures = null; List queryExplanations = null; for (int i = 0; i < shardsResponses.length(); i++) { Object shardResponse = shardsResponses.get(i); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java index 2d3c0a0a90eff..eff37ff4b0cb4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.admin.indices.validate.query; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -46,7 +46,8 @@ public class ValidateQueryResponse extends BroadcastResponse { } - ValidateQueryResponse(boolean valid, List queryExplanations, int totalShards, int successfulShards, int failedShards, List shardFailures) { + ValidateQueryResponse(boolean valid, List queryExplanations, int totalShards, int successfulShards, int failedShards, + List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); this.valid = valid; this.queryExplanations = queryExplanations; diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java index 48e3f5e81bf6f..a7b63da8974fd 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java @@ -35,7 +35,10 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.index.VersionType; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; @@ -47,8 +50,10 @@ import java.util.List; import java.util.Locale; -public class MultiGetRequest extends ActionRequest implements Iterable, CompositeIndicesRequest, RealtimeRequest { +public class MultiGetRequest extends ActionRequest + implements Iterable, CompositeIndicesRequest, RealtimeRequest, ToXContentObject { + private static final ParseField DOCS = new ParseField("docs"); private static final ParseField INDEX = new ParseField("_index"); private static final ParseField TYPE = new ParseField("_type"); private static final ParseField ID = new ParseField("_id"); @@ -63,7 +68,8 @@ public class MultiGetRequest extends ActionRequest implements Iterable items, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, @Nullable String defaultRouting, boolean allowExplicitIndex) throws IOException { + private static void parseDocuments(XContentParser parser, List items, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, @Nullable String defaultRouting, boolean allowExplicitIndex) throws IOException { String currentFieldName = null; - XContentParser.Token token; - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token != XContentParser.Token.START_OBJECT) { + Token token; + while ((token = parser.nextToken()) != Token.END_ARRAY) { + if (token != Token.START_OBJECT) { throw new IllegalArgumentException("docs array element should include an object"); } String index = defaultIndex; @@ -387,8 +414,8 @@ public static void parseDocuments(XContentParser parser, List items, @Null FetchSourceContext fetchSourceContext = FetchSourceContext.FETCH_SOURCE; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { + while ((token = parser.nextToken()) != Token.END_OBJECT) { + if (token == Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { if (INDEX.match(currentFieldName)) { @@ -419,7 +446,7 @@ public static void parseDocuments(XContentParser parser, List items, @Null if (parser.isBooleanValueLenient()) { fetchSourceContext = new FetchSourceContext(parser.booleanValue(), fetchSourceContext.includes(), fetchSourceContext.excludes()); - } else if (token == XContentParser.Token.VALUE_STRING) { + } else if (token == Token.VALUE_STRING) { fetchSourceContext = new FetchSourceContext(fetchSourceContext.fetchSource(), new String[]{parser.text()}, fetchSourceContext.excludes()); } else { @@ -428,30 +455,30 @@ public static void parseDocuments(XContentParser parser, List items, @Null } else { throw new ElasticsearchParseException("failed to parse multi get request. unknown field [{}]", currentFieldName); } - } else if (token == XContentParser.Token.START_ARRAY) { + } else if (token == Token.START_ARRAY) { if (FIELDS.match(currentFieldName)) { throw new ParsingException(parser.getTokenLocation(), "Unsupported field [fields] used, expected [stored_fields] instead"); } else if (STORED_FIELDS.match(currentFieldName)) { storedFields = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + while ((token = parser.nextToken()) != Token.END_ARRAY) { storedFields.add(parser.text()); } } else if (SOURCE.match(currentFieldName)) { ArrayList includes = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + while ((token = parser.nextToken()) != Token.END_ARRAY) { includes.add(parser.text()); } fetchSourceContext = new FetchSourceContext(fetchSourceContext.fetchSource(), includes.toArray(Strings.EMPTY_ARRAY) , fetchSourceContext.excludes()); } - } else if (token == XContentParser.Token.START_OBJECT) { + } else if (token == Token.START_OBJECT) { if (SOURCE.match(currentFieldName)) { List currentList = null, includes = null, excludes = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { + while ((token = parser.nextToken()) != Token.END_OBJECT) { + if (token == Token.FIELD_NAME) { currentFieldName = parser.currentName(); if ("includes".equals(currentFieldName) || "include".equals(currentFieldName)) { currentList = includes != null ? includes : (includes = new ArrayList<>(2)); @@ -460,8 +487,8 @@ public static void parseDocuments(XContentParser parser, List items, @Null } else { throw new ElasticsearchParseException("source definition may not contain [{}]", parser.text()); } - } else if (token == XContentParser.Token.START_ARRAY) { - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + } else if (token == Token.START_ARRAY) { + while ((token = parser.nextToken()) != Token.END_ARRAY) { currentList.add(parser.text()); } } else if (token.isValue()) { @@ -488,13 +515,9 @@ public static void parseDocuments(XContentParser parser, List items, @Null } } - public static void parseDocuments(XContentParser parser, List items) throws IOException { - parseDocuments(parser, items, null, null, null, null, null, true); - } - public static void parseIds(XContentParser parser, List items, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, @Nullable String defaultRouting) throws IOException { - XContentParser.Token token; - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + Token token; + while ((token = parser.nextToken()) != Token.END_ARRAY) { if (!token.isValue()) { throw new IllegalArgumentException("ids array element should only contain ids"); } @@ -537,4 +560,17 @@ public void writeTo(StreamOutput out) throws IOException { item.writeTo(out); } } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startArray(DOCS.getPreferredName()); + for (Item item : items) { + builder.value(item); + } + builder.endArray(); + builder.endObject(); + return builder; + } + } diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java index 93e4272bd956c..9cd9f71a6c53a 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java @@ -21,29 +21,41 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.index.get.GetResult; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; +import java.util.List; public class MultiGetResponse extends ActionResponse implements Iterable, ToXContentObject { + private static final ParseField INDEX = new ParseField("_index"); + private static final ParseField TYPE = new ParseField("_type"); + private static final ParseField ID = new ParseField("_id"); + private static final ParseField ERROR = new ParseField("error"); + private static final ParseField DOCS = new ParseField("docs"); + /** * Represents a failure. */ - public static class Failure implements Streamable { + public static class Failure implements Streamable, ToXContentObject { + private String index; private String type; private String id; private Exception exception; Failure() { - } public Failure(String index, String type, String id, Exception exception) { @@ -103,6 +115,17 @@ public void writeTo(StreamOutput out) throws IOException { out.writeException(exception); } + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(INDEX.getPreferredName(), index); + builder.field(TYPE.getPreferredName(), type); + builder.field(ID.getPreferredName(), id); + ElasticsearchException.generateFailureXContent(builder, params, exception, true); + builder.endObject(); + return builder; + } + public Exception getFailure() { return exception; } @@ -129,16 +152,11 @@ public Iterator iterator() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.startArray(Fields.DOCS); + builder.startArray(DOCS.getPreferredName()); for (MultiGetItemResponse response : responses) { if (response.isFailed()) { - builder.startObject(); Failure failure = response.getFailure(); - builder.field(Fields._INDEX, failure.getIndex()); - builder.field(Fields._TYPE, failure.getType()); - builder.field(Fields._ID, failure.getId()); - ElasticsearchException.generateFailureXContent(builder, params, failure.getFailure(), true); - builder.endObject(); + failure.toXContent(builder, params); } else { GetResponse getResponse = response.getResponse(); getResponse.toXContent(builder, params); @@ -149,11 +167,78 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - static final class Fields { - static final String DOCS = "docs"; - static final String _INDEX = "_index"; - static final String _TYPE = "_type"; - static final String _ID = "_id"; + public static MultiGetResponse fromXContent(XContentParser parser) throws IOException { + String currentFieldName = null; + List items = new ArrayList<>(); + for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) { + switch (token) { + case FIELD_NAME: + currentFieldName = parser.currentName(); + break; + case START_ARRAY: + if (DOCS.getPreferredName().equals(currentFieldName)) { + for (token = parser.nextToken(); token != Token.END_ARRAY; token = parser.nextToken()) { + if (token == Token.START_OBJECT) { + items.add(parseItem(parser)); + } + } + } + break; + default: + // If unknown tokens are encounter then these should be ignored, because + // this is parsing logic on the client side. + break; + } + } + return new MultiGetResponse(items.toArray(new MultiGetItemResponse[0])); + } + + private static MultiGetItemResponse parseItem(XContentParser parser) throws IOException { + String currentFieldName = null; + String index = null; + String type = null; + String id = null; + ElasticsearchException exception = null; + GetResult getResult = null; + for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) { + switch (token) { + case FIELD_NAME: + currentFieldName = parser.currentName(); + if (INDEX.match(currentFieldName) == false && TYPE.match(currentFieldName) == false && + ID.match(currentFieldName) == false && ERROR.match(currentFieldName) == false) { + getResult = GetResult.fromXContentEmbedded(parser, index, type, id); + } + break; + case VALUE_STRING: + if (INDEX.match(currentFieldName)) { + index = parser.text(); + } else if (TYPE.match(currentFieldName)) { + type = parser.text(); + } else if (ID.match(currentFieldName)) { + id = parser.text(); + } + break; + case START_OBJECT: + if (ERROR.match(currentFieldName)) { + exception = ElasticsearchException.fromXContent(parser); + } + break; + default: + // If unknown tokens are encounter then these should be ignored, because + // this is parsing logic on the client side. + break; + } + if (getResult != null) { + break; + } + } + + if (exception != null) { + return new MultiGetItemResponse(null, new Failure(index, type, id, exception)); + } else { + GetResponse getResponse = new GetResponse(getResult); + return new MultiGetItemResponse(getResponse, null); + } } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 4632ef63174a2..b9e9f1ec483d2 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -122,14 +122,14 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha * at least one successful operation left and if so we move to the next phase. If not we immediately fail the * search phase as "all shards failed"*/ if (successfulOps.get() == 0) { // we have 0 successful results that means we shortcut stuff and return a failure + final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures()); + Throwable cause = shardSearchFailures.length == 0 ? null : + ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; if (logger.isDebugEnabled()) { - final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures()); - Throwable cause = shardSearchFailures.length == 0 ? null : - ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; logger.debug((Supplier) () -> new ParameterizedMessage("All shards failed for phase: [{}]", getName()), cause); } - onPhaseFailure(currentPhase, "all shards failed", null); + onPhaseFailure(currentPhase, "all shards failed", cause); } else { if (logger.isTraceEnabled()) { final String resultsFrom = results.getSuccessfulResults() diff --git a/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java b/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java index 2ced9145674a2..a2aace9133cc2 100644 --- a/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java +++ b/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java @@ -22,17 +22,31 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.RestStatus; import java.io.IOException; import static org.elasticsearch.ExceptionsHelper.detailedMessage; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; public class DefaultShardOperationFailedException implements ShardOperationFailedException { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "failures", true, arg -> new DefaultShardOperationFailedException((String) arg[0], (int) arg[1], (Throwable) arg[2])); + + static { + PARSER.declareString(constructorArg(), new ParseField(Fields.INDEX)); + PARSER.declareInt(constructorArg(), new ParseField(Fields.SHARD_ID)); + PARSER.declareObject(optionalConstructorArg(), (p, c) -> ElasticsearchException.fromXContent(p), new ParseField(Fields.REASON)); + } + private String index; private int shardId; @@ -119,8 +133,8 @@ public String toString() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field("shard", shardId()); builder.field("index", index()); + builder.field("shard", shardId()); builder.field("status", status.name()); if (reason != null) { builder.field("reason"); @@ -129,6 +143,15 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); } return builder; + } + + public static DefaultShardOperationFailedException fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + private static final class Fields { + static final String INDEX = "index"; + static final String SHARD_ID = "shardId"; + static final String REASON = "reason"; } } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java index 746780c765722..8c5107b367dea 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java @@ -20,47 +20,60 @@ package org.elasticsearch.action.support.broadcast; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContentFragment; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.shard.ShardNotFoundException; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.rest.RestStatus; import java.io.IOException; import java.util.List; import static org.elasticsearch.action.support.DefaultShardOperationFailedException.readShardOperationFailed; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; /** * Base class for all broadcast operation based responses. */ -public class BroadcastResponse extends ActionResponse implements ToXContentFragment{ - private static final ShardOperationFailedException[] EMPTY = new ShardOperationFailedException[0]; +public class BroadcastResponse extends ActionResponse { + + public static final DefaultShardOperationFailedException[] EMPTY = new DefaultShardOperationFailedException[0]; + + private static final ParseField _SHARDS_FIELD = new ParseField("_shards"); + private static final ParseField TOTAL_FIELD = new ParseField("total"); + private static final ParseField SUCCESSFUL_FIELD = new ParseField("successful"); + private static final ParseField FAILED_FIELD = new ParseField("failed"); + private static final ParseField FAILURES_FIELD = new ParseField("failures"); + private int totalShards; private int successfulShards; private int failedShards; - private ShardOperationFailedException[] shardFailures = EMPTY; + private DefaultShardOperationFailedException[] shardFailures = EMPTY; + + protected static void declareBroadcastFields(ConstructingObjectParser PARSER) { + ConstructingObjectParser shardsParser = new ConstructingObjectParser<>("_shards", true, + arg -> new BroadcastResponse((int) arg[0], (int) arg[1], (int) arg[2], (List) arg[3])); + shardsParser.declareInt(constructorArg(), TOTAL_FIELD); + shardsParser.declareInt(constructorArg(), SUCCESSFUL_FIELD); + shardsParser.declareInt(constructorArg(), FAILED_FIELD); + shardsParser.declareObjectArray(optionalConstructorArg(), (p, c) -> DefaultShardOperationFailedException.fromXContent(p), FAILURES_FIELD); + PARSER.declareObject(constructorArg(), shardsParser, _SHARDS_FIELD); + } public BroadcastResponse() { } public BroadcastResponse(int totalShards, int successfulShards, int failedShards, - List shardFailures) { - assertNoShardNotAvailableFailures(shardFailures); + List shardFailures) { this.totalShards = totalShards; this.successfulShards = successfulShards; this.failedShards = failedShards; - this.shardFailures = shardFailures == null ? EMPTY : - shardFailures.toArray(new ShardOperationFailedException[shardFailures.size()]); - } - - private void assertNoShardNotAvailableFailures(List shardFailures) { - if (shardFailures != null) { - for (Object e : shardFailures) { - assert (e instanceof ShardNotFoundException) == false : "expected no ShardNotFoundException failures, but got " + e; - } + if (shardFailures == null) { + this.shardFailures = EMPTY; + } else { + this.shardFailures = shardFailures.toArray(new DefaultShardOperationFailedException[shardFailures.size()]); } } @@ -99,7 +112,7 @@ public RestStatus getStatus() { /** * The list of shard failures exception. */ - public ShardOperationFailedException[] getShardFailures() { + public DefaultShardOperationFailedException[] getShardFailures() { return shardFailures; } @@ -111,7 +124,7 @@ public void readFrom(StreamInput in) throws IOException { failedShards = in.readVInt(); int size = in.readVInt(); if (size > 0) { - shardFailures = new ShardOperationFailedException[size]; + shardFailures = new DefaultShardOperationFailedException[size]; for (int i = 0; i < size; i++) { shardFailures[i] = readShardOperationFailed(in); } @@ -125,25 +138,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(successfulShards); out.writeVInt(failedShards); out.writeVInt(shardFailures.length); - for (ShardOperationFailedException exp : shardFailures) { + for (DefaultShardOperationFailedException exp : shardFailures) { exp.writeTo(out); } } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(Fields._SHARDS); - builder.field(Fields.TOTAL, getTotalShards()); - builder.field(Fields.SUCCESSFUL, getSuccessfulShards()); - builder.field(Fields.FAILED, getFailedShards()); - builder.endObject(); - return builder; - } - - public static final class Fields { - public static final String _SHARDS = "_shards"; - public static final String TOTAL = "total"; - public static final String SUCCESSFUL = "successful"; - public static final String FAILED = "failed"; - } } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index 3ef967472a597..b6eaa5163c865 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.NoShardAvailableActionException; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.HandledTransportAction; @@ -131,7 +130,7 @@ private Response newResponse( int totalShards = 0; int successfulShards = 0; List broadcastByNodeResponses = new ArrayList<>(); - List exceptions = new ArrayList<>(); + List exceptions = new ArrayList<>(); for (int i = 0; i < responses.length(); i++) { if (responses.get(i) instanceof FailedNodeException) { FailedNodeException exception = (FailedNodeException) responses.get(i); @@ -176,7 +175,7 @@ private Response newResponse( * @param clusterState the cluster state * @return the response */ - protected abstract Response newResponse(Request request, int totalShards, int successfulShards, int failedShards, List results, List shardFailures, ClusterState clusterState); + protected abstract Response newResponse(Request request, int totalShards, int successfulShards, int failedShards, List results, List shardFailures, ClusterState clusterState); /** * Deserialize a request from an input stream diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java index 8193cf77cebef..4cad1c211700d 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java @@ -22,7 +22,6 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.HandledTransportAction; @@ -76,7 +75,7 @@ protected final void doExecute(final Request request, final ActionListener listener) { final ClusterState clusterState = clusterService.state(); List shards = shards(request, clusterState); - final CopyOnWriteArrayList shardsResponses = new CopyOnWriteArrayList(); + final CopyOnWriteArrayList shardsResponses = new CopyOnWriteArrayList<>(); if (shards.size() == 0) { finishAndNotifyListener(listener, shardsResponses); } @@ -148,7 +147,7 @@ private void finishAndNotifyListener(ActionListener listener, CopyOnWriteArrayLi int successfulShards = 0; int failedShards = 0; int totalNumCopies = 0; - List shardFailures = null; + List shardFailures = null; for (int i = 0; i < shardsResponses.size(); i++) { ReplicationResponse shardResponse = shardsResponses.get(i); if (shardResponse == null) { @@ -168,5 +167,6 @@ private void finishAndNotifyListener(ActionListener listener, CopyOnWriteArrayLi listener.onResponse(newResponse(successfulShards, failedShards, totalNumCopies, shardFailures)); } - protected abstract BroadcastResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List shardFailures); + protected abstract BroadcastResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, + List shardFailures); } diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index fa8c46edf5b7e..9bfb78f5058b4 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.update; +import java.util.Arrays; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -893,4 +894,28 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); return builder; } + + @Override + public String toString() { + StringBuilder res = new StringBuilder() + .append("update {[").append(index) + .append("][").append(type) + .append("][").append(id).append("]"); + res.append(", doc_as_upsert[").append(docAsUpsert).append("]"); + if (doc != null) { + res.append(", doc[").append(doc).append("]"); + } + if (script != null) { + res.append(", script[").append(script).append("]"); + } + if (upsertRequest != null) { + res.append(", upsert[").append(upsertRequest).append("]"); + } + res.append(", scripted_upsert[").append(scriptedUpsert).append("]"); + res.append(", detect_noop[").append(detectNoop).append("]"); + if (fields != null) { + res.append(", fields[").append(Arrays.toString(fields)).append("]"); + } + return res.append("}").toString(); + } } diff --git a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java index dc0f7b015632e..c9c575df724df 100644 --- a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.GenericAction; import org.elasticsearch.client.support.AbstractClient; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -195,8 +196,16 @@ private static ClientTemplate buildTemplate(Settings providedSettings, Settings final TransportClientNodesService nodesService = new TransportClientNodesService(settings, transportService, threadPool, failureListner == null ? (t, e) -> {} : failureListner); - final TransportProxyClient proxy = new TransportProxyClient(settings, transportService, nodesService, - actionModule.getActions().values().stream().map(x -> x.getAction()).collect(Collectors.toList())); + + // construct the list of client actions + final List actionPlugins = pluginsService.filterPlugins(ActionPlugin.class); + final List clientActions = + actionPlugins.stream().flatMap(p -> p.getClientActions().stream()).collect(Collectors.toList()); + // add all the base actions + final List> baseActions = + actionModule.getActions().values().stream().map(ActionPlugin.ActionHandler::getAction).collect(Collectors.toList()); + clientActions.addAll(baseActions); + final TransportProxyClient proxy = new TransportProxyClient(settings, transportService, nodesService, clientActions); List pluginLifecycleComponents = new ArrayList<>(pluginsService.getGuiceServiceClasses().stream() .map(injector::getInstance).collect(Collectors.toList())); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 0e9bcf8f11a8b..23ed28569d28d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -275,12 +275,7 @@ public ImmutableOpenMap> findAliases(final String[] if (!filteredValues.isEmpty()) { // Make the list order deterministic - CollectionUtil.timSort(filteredValues, new Comparator() { - @Override - public int compare(AliasMetaData o1, AliasMetaData o2) { - return o1.alias().compareTo(o2.alias()); - } - }); + CollectionUtil.timSort(filteredValues, Comparator.comparing(AliasMetaData::alias)); } mapBuilder.put(index, Collections.unmodifiableList(filteredValues)); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 28a7570ca5582..344c424a62484 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -444,7 +444,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { // now add the mappings MapperService mapperService = indexService.mapperService(); try { - mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, request.updateAllTypes()); + mapperService.merge(mappings, MergeReason.MAPPING_UPDATE); } catch (Exception e) { removalExtraInfo = "failed on parsing default mapping/mappings on index creation"; throw e; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java index 0949032db1665..37831f977aec7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java @@ -144,7 +144,7 @@ ClusterState innerExecute(ClusterState currentState, Iterable actio } catch (IOException e) { throw new ElasticsearchException("Failed to create temporary index for parsing the alias", e); } - indexService.mapperService().merge(index, MapperService.MergeReason.MAPPING_RECOVERY, false); + indexService.mapperService().merge(index, MapperService.MergeReason.MAPPING_RECOVERY); } indices.put(action.getIndex(), indexService); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index 9d8da37cbeeba..de065a4b922f6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -250,7 +250,7 @@ private static void validateAndAddTemplate(final PutRequest request, IndexTempla mappingsForValidation.put(entry.getKey(), MapperService.parseMapping(xContentRegistry, entry.getValue())); } - dummyIndexService.mapperService().merge(mappingsForValidation, MergeReason.MAPPING_UPDATE, false); + dummyIndexService.mapperService().merge(mappingsForValidation, MergeReason.MAPPING_UPDATE); } finally { if (createdIndex != null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index e17b9fbb4d56a..a9301056f5ae0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -187,7 +187,7 @@ public Set> entrySet() { try (IndexAnalyzers fakeIndexAnalzyers = new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap, analyzerMap)) { MapperService mapperService = new MapperService(indexSettings, fakeIndexAnalzyers, xContentRegistry, similarityService, mapperRegistry, () -> null); - mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, false); + mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY); } } catch (Exception ex) { // Wrap the inner exception so we have the index name in the exception message diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 12a56f00bd4f0..a116bc369b5e4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -147,7 +147,7 @@ ClusterState executeRefresh(final ClusterState currentState, final List execute(ClusterSt MapperService mapperService = indicesService.createIndexMapperService(indexMetaData); indexMapperServices.put(index, mapperService); // add mappings for all types, we need them for cross-type validation - mapperService.merge(indexMetaData, MergeReason.MAPPING_RECOVERY, request.updateAllTypes()); + mapperService.merge(indexMetaData, MergeReason.MAPPING_RECOVERY); } } currentState = applyRequest(currentState, request, indexMapperServices); @@ -264,7 +264,7 @@ private ClusterState applyRequest(ClusterState currentState, PutMappingClusterSt newMapper = mapperService.parse(request.type(), mappingUpdateSource, existingMapper == null); if (existingMapper != null) { // first, simulate: just call merge and ignore the result - existingMapper.merge(newMapper.mapping(), request.updateAllTypes()); + existingMapper.merge(newMapper.mapping()); } else { // TODO: can we find a better place for this validation? // The reason this validation is here is that the mapper service doesn't learn about @@ -310,7 +310,7 @@ private ClusterState applyRequest(ClusterState currentState, PutMappingClusterSt if (existingMapper != null) { existingSource = existingMapper.mappingSource(); } - DocumentMapper mergedMapper = mapperService.merge(mappingType, mappingUpdateSource, MergeReason.MAPPING_UPDATE, request.updateAllTypes()); + DocumentMapper mergedMapper = mapperService.merge(mappingType, mappingUpdateSource, MergeReason.MAPPING_UPDATE); CompressedXContent updatedSource = mergedMapper.mappingSource(); if (existingSource != null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 4373069a5f77c..057d37d5999a2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -39,6 +39,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; /** * This class holds all {@link DiscoveryNode} in the cluster and provides convenience methods to @@ -205,12 +206,14 @@ public DiscoveryNode getLocalNode() { } /** - * Get the master node - * - * @return master node + * Returns the master node, or {@code null} if there is no master node */ + @Nullable public DiscoveryNode getMasterNode() { - return nodes.get(masterNodeId); + if (masterNodeId != null) { + return nodes.get(masterNodeId); + } + return null; } /** @@ -385,27 +388,20 @@ public DiscoveryNodes newNode(DiscoveryNode node) { * Returns the changes comparing this nodes to the provided nodes. */ public Delta delta(DiscoveryNodes other) { - List removed = new ArrayList<>(); - List added = new ArrayList<>(); + final List removed = new ArrayList<>(); + final List added = new ArrayList<>(); for (DiscoveryNode node : other) { - if (!this.nodeExists(node)) { + if (this.nodeExists(node) == false) { removed.add(node); } } for (DiscoveryNode node : this) { - if (!other.nodeExists(node)) { + if (other.nodeExists(node) == false) { added.add(node); } } - DiscoveryNode previousMasterNode = null; - DiscoveryNode newMasterNode = null; - if (masterNodeId != null) { - if (other.masterNodeId == null || !other.masterNodeId.equals(masterNodeId)) { - previousMasterNode = other.getMasterNode(); - newMasterNode = getMasterNode(); - } - } - return new Delta(previousMasterNode, newMasterNode, localNodeId, Collections.unmodifiableList(removed), + + return new Delta(other.getMasterNode(), getMasterNode(), localNodeId, Collections.unmodifiableList(removed), Collections.unmodifiableList(added)); } @@ -429,8 +425,8 @@ public String toString() { public static class Delta { private final String localNodeId; - private final DiscoveryNode previousMasterNode; - private final DiscoveryNode newMasterNode; + @Nullable private final DiscoveryNode previousMasterNode; + @Nullable private final DiscoveryNode newMasterNode; private final List removed; private final List added; @@ -448,13 +444,15 @@ public boolean hasChanges() { } public boolean masterNodeChanged() { - return newMasterNode != null; + return Objects.equals(newMasterNode, previousMasterNode) == false; } + @Nullable public DiscoveryNode previousMasterNode() { return previousMasterNode; } + @Nullable public DiscoveryNode newMasterNode() { return newMasterNode; } @@ -476,51 +474,45 @@ public List addedNodes() { } public String shortSummary() { - StringBuilder sb = new StringBuilder(); - if (!removed() && masterNodeChanged()) { - if (newMasterNode.getId().equals(localNodeId)) { - // we are the master, no nodes we removed, we are actually the first master - sb.append("new_master ").append(newMasterNode()); - } else { - // we are not the master, so we just got this event. No nodes were removed, so its not a *new* master - sb.append("detected_master ").append(newMasterNode()); + final StringBuilder summary = new StringBuilder(); + if (masterNodeChanged()) { + summary.append("master node changed {previous ["); + if (previousMasterNode() != null) { + summary.append(previousMasterNode()); } - } else { - if (masterNodeChanged()) { - sb.append("master {new ").append(newMasterNode()); - if (previousMasterNode() != null) { - sb.append(", previous ").append(previousMasterNode()); - } - sb.append("}"); + summary.append("], current ["); + if (newMasterNode() != null) { + summary.append(newMasterNode()); } - if (removed()) { - if (masterNodeChanged()) { - sb.append(", "); - } - sb.append("removed {"); - for (DiscoveryNode node : removedNodes()) { - sb.append(node).append(','); - } - sb.append("}"); + summary.append("]}"); + } + if (removed()) { + if (summary.length() > 0) { + summary.append(", "); + } + summary.append("removed {"); + for (DiscoveryNode node : removedNodes()) { + summary.append(node).append(','); } + summary.append("}"); } if (added()) { // don't print if there is one added, and it is us if (!(addedNodes().size() == 1 && addedNodes().get(0).getId().equals(localNodeId))) { - if (removed() || masterNodeChanged()) { - sb.append(", "); + if (summary.length() > 0) { + summary.append(", "); } - sb.append("added {"); + summary.append("added {"); for (DiscoveryNode node : addedNodes()) { if (!node.getId().equals(localNodeId)) { // don't print ourself - sb.append(node).append(','); + summary.append(node).append(','); } } - sb.append("}"); + summary.append("}"); } } - return sb.toString(); + return summary.toString(); } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index bc22dbb63ebd8..fd91a8a7601c6 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -597,7 +597,7 @@ AbstractScopedSettings.SettingUpdater> newAffixMapUpdater(Consume @Override public boolean hasChanged(Settings current, Settings previous) { - return Stream.concat(matchStream(current), matchStream(previous)).findAny().isPresent(); + return current.filter(k -> match(k)).equals(previous.filter(k -> match(k))) == false; } @Override @@ -612,7 +612,7 @@ public Map getValue(Settings current, Settings previous) { if (updater.hasChanged(current, previous)) { // only the ones that have changed otherwise we might get too many updates // the hasChanged above checks only if there are any changes - T value = updater.getValue(current, previous); + T value = updater.getValue(current, previous); if ((omitDefaults && value.equals(concreteSetting.getDefault(current))) == false) { result.put(namespace, value); } diff --git a/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java b/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java index 54a49f7e4f254..08d02cdea3172 100644 --- a/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java +++ b/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java @@ -19,16 +19,20 @@ package org.elasticsearch.common.util; +import java.nio.file.Path; import java.util.AbstractList; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Comparator; +import java.util.IdentityHashMap; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.RandomAccess; +import java.util.Set; import com.carrotsearch.hppc.ObjectArrayList; import org.apache.lucene.util.BytesRef; @@ -221,6 +225,40 @@ public static int[] toArray(Collection ints) { return ints.stream().mapToInt(s -> s).toArray(); } + public static void ensureNoSelfReferences(Object value) { + Iterable it = convert(value); + if (it != null) { + ensureNoSelfReferences(it, value, Collections.newSetFromMap(new IdentityHashMap<>())); + } + } + + private static Iterable convert(Object value) { + if (value == null) { + return null; + } + if (value instanceof Map) { + return ((Map) value).values(); + } else if ((value instanceof Iterable) && (value instanceof Path == false)) { + return (Iterable) value; + } else if (value instanceof Object[]) { + return Arrays.asList((Object[]) value); + } else { + return null; + } + } + + private static void ensureNoSelfReferences(final Iterable value, Object originalReference, final Set ancestors) { + if (value != null) { + if (ancestors.add(originalReference) == false) { + throw new IllegalArgumentException("Iterable object is self-referencing itself"); + } + for (Object o : value) { + ensureNoSelfReferences(convert(o), o, ancestors); + } + ancestors.remove(originalReference); + } + } + private static class RotatedList extends AbstractList implements RandomAccess { private final List in; diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java index 070510e13ff69..9f7603c997ea8 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.CollectionUtils; import org.joda.time.DateTimeZone; import org.joda.time.ReadableInstant; import org.joda.time.format.DateTimeFormatter; @@ -43,7 +44,6 @@ import java.util.Collections; import java.util.Date; import java.util.HashMap; -import java.util.IdentityHashMap; import java.util.Locale; import java.util.Map; import java.util.Objects; @@ -780,7 +780,6 @@ private XContentBuilder values(Object[] values, boolean ensureNoSelfReferences) if (values == null) { return nullValue(); } - return value(Arrays.asList(values), ensureNoSelfReferences); } @@ -865,7 +864,7 @@ private XContentBuilder map(Map values, boolean ensureNoSelfReference // checks that the map does not contain references to itself because // iterating over map entries will cause a stackoverflow error if (ensureNoSelfReferences) { - ensureNoSelfReferences(values); + CollectionUtils.ensureNoSelfReferences(values); } startObject(); @@ -894,9 +893,8 @@ private XContentBuilder value(Iterable values, boolean ensureNoSelfReferences // checks that the iterable does not contain references to itself because // iterating over entries will cause a stackoverflow error if (ensureNoSelfReferences) { - ensureNoSelfReferences(values); + CollectionUtils.ensureNoSelfReferences(values); } - startArray(); for (Object value : values) { // pass ensureNoSelfReferences=false as we already performed the check at a higher level @@ -1067,32 +1065,4 @@ static void ensureNotNull(Object value, String message) { throw new IllegalArgumentException(message); } } - - static void ensureNoSelfReferences(Object value) { - ensureNoSelfReferences(value, Collections.newSetFromMap(new IdentityHashMap<>())); - } - - private static void ensureNoSelfReferences(final Object value, final Set ancestors) { - if (value != null) { - - Iterable it; - if (value instanceof Map) { - it = ((Map) value).values(); - } else if ((value instanceof Iterable) && (value instanceof Path == false)) { - it = (Iterable) value; - } else if (value instanceof Object[]) { - it = Arrays.asList((Object[]) value); - } else { - return; - } - - if (ancestors.add(value) == false) { - throw new IllegalArgumentException("Object has already been built and is self-referencing itself"); - } - for (Object o : it) { - ensureNoSelfReferences(o, ancestors); - } - ancestors.remove(value); - } - } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 2d5a1dda46460..0285dcf93c1ea 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -700,7 +700,7 @@ private void maybeFSyncTranslogs() { try { Translog translog = shard.getTranslog(); if (translog.syncNeeded()) { - translog.sync(); + shard.sync(); } } catch (AlreadyClosedException ex) { // fine - continue; diff --git a/server/src/main/java/org/elasticsearch/index/VersionType.java b/server/src/main/java/org/elasticsearch/index/VersionType.java index c5094ea185db1..6a8214cb0b8ec 100644 --- a/server/src/main/java/org/elasticsearch/index/VersionType.java +++ b/server/src/main/java/org/elasticsearch/index/VersionType.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.lucene.uid.Versions; import java.io.IOException; +import java.util.Locale; public enum VersionType implements Writeable { INTERNAL((byte) 0) { @@ -350,6 +351,10 @@ public static VersionType fromString(String versionType, VersionType defaultVers return fromString(versionType); } + public static String toString(VersionType versionType) { + return versionType.name().toLowerCase(Locale.ROOT); + } + public static VersionType fromValue(byte value) { if (value == 0) { return INTERNAL; diff --git a/server/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java b/server/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java index bf9045c5d00e1..37e96cbb54a57 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java @@ -47,8 +47,8 @@ public SynonymTokenFilterFactory(IndexSettings indexSettings, Environment env, A if (settings.get("ignore_case") != null) { deprecationLogger.deprecated( - "This tokenize synonyms with whatever tokenizer and token filters appear before it in the chain. " + - "If you need ignore case with this filter, you should set lowercase filter before this"); + "The ignore_case option on the synonym_graph filter is deprecated. " + + "Instead, insert a lowercase filter in the filter chain before the synonym_graph filter."); } this.expand = settings.getAsBoolean("expand", true); diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index e5d8cacf73657..48a3caf0ea32b 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -45,37 +45,72 @@ public final class CombinedDeletionPolicy extends IndexDeletionPolicy { private final TranslogDeletionPolicy translogDeletionPolicy; private final EngineConfig.OpenMode openMode; private final LongSupplier globalCheckpointSupplier; + private final IndexCommit startingCommit; private final ObjectIntHashMap snapshottedCommits; // Number of snapshots held against each commit point. - private IndexCommit safeCommit; // the most recent safe commit point - its max_seqno at most the persisted global checkpoint. - private IndexCommit lastCommit; // the most recent commit point + private volatile IndexCommit safeCommit; // the most recent safe commit point - its max_seqno at most the persisted global checkpoint. + private volatile IndexCommit lastCommit; // the most recent commit point CombinedDeletionPolicy(EngineConfig.OpenMode openMode, TranslogDeletionPolicy translogDeletionPolicy, - LongSupplier globalCheckpointSupplier) { + LongSupplier globalCheckpointSupplier, IndexCommit startingCommit) { this.openMode = openMode; this.translogDeletionPolicy = translogDeletionPolicy; this.globalCheckpointSupplier = globalCheckpointSupplier; + this.startingCommit = startingCommit; this.snapshottedCommits = new ObjectIntHashMap<>(); } @Override - public void onInit(List commits) throws IOException { + public synchronized void onInit(List commits) throws IOException { switch (openMode) { case CREATE_INDEX_AND_TRANSLOG: + assert startingCommit == null : "CREATE_INDEX_AND_TRANSLOG must not have starting commit; commit [" + startingCommit + "]"; break; case OPEN_INDEX_CREATE_TRANSLOG: - assert commits.isEmpty() == false : "index is opened, but we have no commits"; - // When an engine starts with OPEN_INDEX_CREATE_TRANSLOG, a new fresh index commit will be created immediately. - // We therefore can simply skip processing here as `onCommit` will be called right after with a new commit. - break; case OPEN_INDEX_AND_TRANSLOG: assert commits.isEmpty() == false : "index is opened, but we have no commits"; - onCommit(commits); + assert startingCommit != null && commits.contains(startingCommit) : "Starting commit not in the existing commit list; " + + "startingCommit [" + startingCommit + "], commit list [" + commits + "]"; + keepOnlyStartingCommitOnInit(commits); + // OPEN_INDEX_CREATE_TRANSLOG can open an index commit from other shard with a different translog history, + // We therefore should not use that index commit to update the translog deletion policy. + if (openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { + updateTranslogDeletionPolicy(); + } break; default: throw new IllegalArgumentException("unknown openMode [" + openMode + "]"); } } + /** + * Keeping existing unsafe commits when opening an engine can be problematic because these commits are not safe + * at the recovering time but they can suddenly become safe in the future. + * The following issues can happen if unsafe commits are kept oninit. + *

+ * 1. Replica can use unsafe commit in peer-recovery. This happens when a replica with a safe commit c1(max_seqno=1) + * and an unsafe commit c2(max_seqno=2) recovers from a primary with c1(max_seqno=1). If a new document(seqno=2) + * is added without flushing, the global checkpoint is advanced to 2; and the replica recovers again, it will use + * the unsafe commit c2(max_seqno=2 at most gcp=2) as the starting commit for sequenced-based recovery even the + * commit c2 contains a stale operation and the document(with seqno=2) will not be replicated to the replica. + *

+ * 2. Min translog gen for recovery can go backwards in peer-recovery. This happens when are replica with a safe commit + * c1(local_checkpoint=1, recovery_translog_gen=1) and an unsafe commit c2(local_checkpoint=2, recovery_translog_gen=2). + * The replica recovers from a primary, and keeps c2 as the last commit, then sets last_translog_gen to 2. Flushing a new + * commit on the replica will cause exception as the new last commit c3 will have recovery_translog_gen=1. The recovery + * translog generation of a commit is calculated based on the current local checkpoint. The local checkpoint of c3 is 1 + * while the local checkpoint of c2 is 2. + *

+ * 3. Commit without translog can be used in recovery. An old index, which was created before multiple-commits is introduced + * (v6.2), may not have a safe commit. If that index has a snapshotted commit without translog and an unsafe commit, + * the policy can consider the snapshotted commit as a safe commit for recovery even the commit does not have translog. + */ + private void keepOnlyStartingCommitOnInit(List commits) { + commits.stream().filter(commit -> startingCommit.equals(commit) == false).forEach(IndexCommit::delete); + assert startingCommit.isDeleted() == false : "Starting commit must not be deleted"; + lastCommit = startingCommit; + safeCommit = startingCommit; + } + @Override public synchronized void onCommit(List commits) throws IOException { final int keptPosition = indexOfKeptCommits(commits, globalCheckpointSupplier.getAsLong()); @@ -179,6 +214,21 @@ private static int indexOfKeptCommits(List commits, long return 0; } + /** + * Checks if the deletion policy can release some index commits with the latest global checkpoint. + */ + boolean hasUnreferencedCommits() throws IOException { + final IndexCommit lastCommit = this.lastCommit; + if (safeCommit != lastCommit) { // Race condition can happen but harmless + if (lastCommit.getUserData().containsKey(SequenceNumbers.MAX_SEQ_NO)) { + final long maxSeqNoFromLastCommit = Long.parseLong(lastCommit.getUserData().get(SequenceNumbers.MAX_SEQ_NO)); + // We can clean up the current safe commit if the last commit is safe + return globalCheckpointSupplier.getAsLong() >= maxSeqNoFromLastCommit; + } + } + return false; + } + /** * A wrapper of an index commit that prevents it from being deleted. */ diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index b73bfb78f3cb9..7feaeb63ac36f 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -91,6 +91,7 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.BiFunction; +import java.util.stream.Stream; public abstract class Engine implements Closeable { @@ -549,6 +550,13 @@ public enum SearcherScope { /** returns the translog for this engine */ public abstract Translog getTranslog(); + /** + * Ensures that all locations in the given stream have been written to the underlying storage. + */ + public abstract boolean ensureTranslogSynced(Stream locations) throws IOException; + + public abstract void syncTranslog() throws IOException; + protected void ensureOpen() { if (isClosed.get()) { throw new AlreadyClosedException(shardId + " engine is closed", failedEngine.get()); diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 1b7b891efd6ff..97a6403ec3b23 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -31,7 +31,6 @@ import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; -import org.apache.lucene.index.SnapshotDeletionPolicy; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ReferenceManager; @@ -94,6 +93,7 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.function.BiFunction; import java.util.function.LongSupplier; +import java.util.stream.Stream; public class InternalEngine extends Engine { @@ -185,7 +185,7 @@ public InternalEngine(EngineConfig engineConfig) { "Starting commit should be non-null; mode [" + openMode + "]; startingCommit [" + startingCommit + "]"; this.localCheckpointTracker = createLocalCheckpointTracker(localCheckpointTrackerSupplier, startingCommit); this.combinedDeletionPolicy = new CombinedDeletionPolicy(openMode, translogDeletionPolicy, - translog::getLastSyncedGlobalCheckpoint); + translog::getLastSyncedGlobalCheckpoint, startingCommit); writer = createWriter(openMode == EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG, startingCommit); updateMaxUnsafeAutoIdTimestampFromWriter(writer); assert engineConfig.getForceNewHistoryUUID() == false @@ -411,28 +411,44 @@ public void skipTranslogRecovery() { } private IndexCommit getStartingCommitPoint() throws IOException { - if (openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { - final long lastSyncedGlobalCheckpoint = translog.getLastSyncedGlobalCheckpoint(); - final long minRetainedTranslogGen = translog.getMinFileGeneration(); - final List existingCommits = DirectoryReader.listCommits(store.directory()); - // We may not have a safe commit if an index was create before v6.2; and if there is a snapshotted commit whose full translog - // files are not retained but max_seqno is at most the global checkpoint, we may mistakenly select it as a starting commit. - // To avoid this issue, we only select index commits whose translog files are fully retained. - if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_2_0)) { - final List recoverableCommits = new ArrayList<>(); - for (IndexCommit commit : existingCommits) { - if (minRetainedTranslogGen <= Long.parseLong(commit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY))) { - recoverableCommits.add(commit); + final IndexCommit startingIndexCommit; + final List existingCommits; + switch (openMode) { + case CREATE_INDEX_AND_TRANSLOG: + startingIndexCommit = null; + break; + case OPEN_INDEX_CREATE_TRANSLOG: + // Use the last commit + existingCommits = DirectoryReader.listCommits(store.directory()); + startingIndexCommit = existingCommits.get(existingCommits.size() - 1); + break; + case OPEN_INDEX_AND_TRANSLOG: + // Use the safe commit + final long lastSyncedGlobalCheckpoint = translog.getLastSyncedGlobalCheckpoint(); + final long minRetainedTranslogGen = translog.getMinFileGeneration(); + existingCommits = DirectoryReader.listCommits(store.directory()); + // We may not have a safe commit if an index was create before v6.2; and if there is a snapshotted commit whose translog + // are not retained but max_seqno is at most the global checkpoint, we may mistakenly select it as a starting commit. + // To avoid this issue, we only select index commits whose translog are fully retained. + if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_2_0)) { + final List recoverableCommits = new ArrayList<>(); + for (IndexCommit commit : existingCommits) { + if (minRetainedTranslogGen <= Long.parseLong(commit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY))) { + recoverableCommits.add(commit); + } } + assert recoverableCommits.isEmpty() == false : "No commit point with translog found; " + + "commits [" + existingCommits + "], minRetainedTranslogGen [" + minRetainedTranslogGen + "]"; + startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(recoverableCommits, lastSyncedGlobalCheckpoint); + } else { + // TODO: Asserts the starting commit is a safe commit once peer-recovery sets global checkpoint. + startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, lastSyncedGlobalCheckpoint); } - assert recoverableCommits.isEmpty() == false : "No commit point with full translog found; " + - "commits [" + existingCommits + "], minRetainedTranslogGen [" + minRetainedTranslogGen + "]"; - return CombinedDeletionPolicy.findSafeCommitPoint(recoverableCommits, lastSyncedGlobalCheckpoint); - } else { - return CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, lastSyncedGlobalCheckpoint); - } + break; + default: + throw new IllegalArgumentException("unknown mode: " + openMode); } - return null; + return startingIndexCommit; } private void recoverFromTranslogInternal() throws IOException { @@ -504,6 +520,27 @@ public Translog getTranslog() { return translog; } + @Override + public boolean ensureTranslogSynced(Stream locations) throws IOException { + final boolean synced = translog.ensureSynced(locations); + if (synced) { + revisitIndexDeletionPolicyOnTranslogSynced(); + } + return synced; + } + + @Override + public void syncTranslog() throws IOException { + translog.sync(); + revisitIndexDeletionPolicyOnTranslogSynced(); + } + + private void revisitIndexDeletionPolicyOnTranslogSynced() throws IOException { + if (combinedDeletionPolicy.hasUnreferencedCommits()) { + indexWriter.deleteUnusedFiles(); + } + } + @Override public String getHistoryUUID() { return historyUUID; @@ -557,9 +594,7 @@ private ExternalSearcherManager createSearcherManager(SearchFactory externalSear final DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId); internalSearcherManager = new SearcherManager(directoryReader, new RamAccountingSearcherFactory(engineConfig.getCircuitBreakerService())); - // The index commit from IndexWriterConfig is null if the engine is open with other modes - // rather than CREATE_INDEX_AND_TRANSLOG. In those cases lastCommittedSegmentInfos will be retrieved from the last commit. - lastCommittedSegmentInfos = store.readCommittedSegmentsInfo(indexWriter.getConfig().getIndexCommit()); + lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); ExternalSearcherManager externalSearcherManager = new ExternalSearcherManager(internalSearcherManager, externalSearcherFactory); success = true; diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java b/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java index 8194b888615e7..a9d8df1cb264f 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java @@ -48,7 +48,7 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo case "none": return s; default: - throw new IllegalArgumentException("failed to parse [" + s + "] must be one of [node,node]"); + throw new IllegalArgumentException("failed to parse [" + s + "] must be one of [node,none]"); } }, Property.IndexScope); diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java index 6dd9552b6903b..ed9dd14328dc5 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java @@ -32,9 +32,9 @@ public class ShardFieldData implements IndexFieldDataCache.Listener { - final CounterMetric evictionsMetric = new CounterMetric(); - final CounterMetric totalMetric = new CounterMetric(); - final ConcurrentMap perFieldTotals = ConcurrentCollections.newConcurrentMap(); + private final CounterMetric evictionsMetric = new CounterMetric(); + private final CounterMetric totalMetric = new CounterMetric(); + private final ConcurrentMap perFieldTotals = ConcurrentCollections.newConcurrentMap(); public FieldDataStats stats(String... fields) { ObjectLongHashMap fieldTotals = null; diff --git a/server/src/main/java/org/elasticsearch/index/get/GetResult.java b/server/src/main/java/org/elasticsearch/index/get/GetResult.java index 75e283b4191b1..4cdf2a4892690 100644 --- a/server/src/main/java/org/elasticsearch/index/get/GetResult.java +++ b/server/src/main/java/org/elasticsearch/index/get/GetResult.java @@ -269,14 +269,19 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public static GetResult fromXContentEmbedded(XContentParser parser) throws IOException { XContentParser.Token token = parser.nextToken(); ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation); + return fromXContentEmbedded(parser, null, null, null); + } + + public static GetResult fromXContentEmbedded(XContentParser parser, String index, String type, String id) throws IOException { + XContentParser.Token token = parser.currentToken(); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation); String currentFieldName = parser.currentName(); - String index = null, type = null, id = null; long version = -1; Boolean found = null; BytesReference source = null; Map fields = new HashMap<>(); - while((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java index 186334c85cb33..0c03e8a551f6f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java @@ -324,8 +324,8 @@ public String typeName() { } @Override - public void checkCompatibility(MappedFieldType fieldType, List conflicts, boolean strict) { - super.checkCompatibility(fieldType, conflicts, strict); + public void checkCompatibility(MappedFieldType fieldType, List conflicts) { + super.checkCompatibility(fieldType, conflicts); CompletionFieldType other = (CompletionFieldType)fieldType; if (preservePositionIncrements != other.preservePositionIncrements) { @@ -607,8 +607,8 @@ protected String contentType() { } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); CompletionFieldMapper fieldMergeWith = (CompletionFieldMapper) mergeWith; this.maxInputLength = fieldMergeWith.maxInputLength; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index 3b21a3bd7400b..00e09112deed2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -219,8 +219,8 @@ public String typeName() { } @Override - public void checkCompatibility(MappedFieldType fieldType, List conflicts, boolean strict) { - super.checkCompatibility(fieldType, conflicts, strict); + public void checkCompatibility(MappedFieldType fieldType, List conflicts) { + super.checkCompatibility(fieldType, conflicts); DateFieldType other = (DateFieldType) fieldType; if (Objects.equals(dateTimeFormatter().format(), other.dateTimeFormatter().format()) == false) { conflicts.add("mapper [" + name() + "] has different [format] values"); @@ -472,8 +472,8 @@ protected void parseCreateField(ParseContext context, List field } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); final DateFieldMapper other = (DateFieldMapper) mergeWith; if (other.ignoreMalformed.explicit()) { this.ignoreMalformed = other.ignoreMalformed; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index e1e33739ac4b5..42f842e612803 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -296,8 +296,8 @@ public boolean isParent(String type) { return mapperService.getParentTypes().contains(type); } - public DocumentMapper merge(Mapping mapping, boolean updateAllTypes) { - Mapping merged = this.mapping.merge(mapping, updateAllTypes); + public DocumentMapper merge(Mapping mapping) { + Mapping merged = this.mapping.merge(mapping); return new DocumentMapper(mapperService, merged); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 596581a15a22c..aa286b883468f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -218,7 +218,7 @@ static Mapping createDynamicUpdate(Mapping mapping, DocumentMapper docMapper, Li // We can see the same mapper more than once, for example, if we had foo.bar and foo.baz, where // foo did not yet exist. This will create 2 copies in dynamic mappings, which should be identical. // Here we just skip over the duplicates, but we merge them to ensure there are no conflicts. - newMapper.merge(previousMapper, false); + newMapper.merge(previousMapper); continue; } previousMapper = newMapper; @@ -275,7 +275,7 @@ private static void addToLastMapper(List parentMappers, Mapper map int lastIndex = parentMappers.size() - 1; ObjectMapper withNewMapper = parentMappers.get(lastIndex).mappingUpdate(mapper); if (merge) { - withNewMapper = parentMappers.get(lastIndex).merge(withNewMapper, false); + withNewMapper = parentMappers.get(lastIndex).merge(withNewMapper); } parentMappers.set(lastIndex, withNewMapper); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index c6e0dd9c00b72..f23a8d0ce96aa 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -312,17 +312,16 @@ protected FieldMapper clone() { } @Override - public FieldMapper merge(Mapper mergeWith, boolean updateAllTypes) { + public FieldMapper merge(Mapper mergeWith) { FieldMapper merged = clone(); - merged.doMerge(mergeWith, updateAllTypes); + merged.doMerge(mergeWith); return merged; } /** * Merge changes coming from {@code mergeWith} in place. - * @param updateAllTypes TODO */ - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { if (!this.getClass().equals(mergeWith.getClass())) { String mergedType = mergeWith.getClass().getSimpleName(); if (mergeWith instanceof FieldMapper) { @@ -553,7 +552,7 @@ public MultiFields merge(MultiFields mergeWith) { if (mergeIntoMapper == null) { newMappersBuilder.put(mergeWithMapper.simpleName(), mergeWithMapper); } else { - FieldMapper merged = mergeIntoMapper.merge(mergeWithMapper, false); + FieldMapper merged = mergeIntoMapper.merge(mergeWithMapper); newMappersBuilder.put(merged.simpleName(), merged); // override previous definition } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java index 8482a94cfc74c..ada640f873975 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java @@ -165,17 +165,6 @@ public String typeName() { return CONTENT_TYPE; } - @Override - public void checkCompatibility(MappedFieldType fieldType, List conflicts, boolean strict) { - super.checkCompatibility(fieldType, conflicts, strict); - if (strict) { - FieldNamesFieldType other = (FieldNamesFieldType)fieldType; - if (isEnabled() != other.isEnabled()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [enabled] across all types."); - } - } - } - public void setEnabled(boolean enabled) { checkIfFrozen(); this.enabled = enabled; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java index fee41e43f2a3c..069468ddb7a25 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java @@ -24,7 +24,6 @@ import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -39,37 +38,13 @@ class FieldTypeLookup implements Iterable { /** Full field name to field type */ final CopyOnWriteHashMap fullNameToFieldType; - /** Full field name to types containing a mapping for this full name. */ - final CopyOnWriteHashMap> fullNameToTypes; - /** Create a new empty instance. */ FieldTypeLookup() { fullNameToFieldType = new CopyOnWriteHashMap<>(); - fullNameToTypes = new CopyOnWriteHashMap<>(); } - private FieldTypeLookup( - CopyOnWriteHashMap fullName, - CopyOnWriteHashMap> fullNameToTypes) { + private FieldTypeLookup(CopyOnWriteHashMap fullName) { this.fullNameToFieldType = fullName; - this.fullNameToTypes = fullNameToTypes; - } - - private static CopyOnWriteHashMap> addType(CopyOnWriteHashMap> map, String key, String type) { - Set types = map.get(key); - if (types == null) { - return map.copyAndPut(key, Collections.singleton(type)); - } else if (types.contains(type)) { - // noting to do - return map; - } else { - Set newTypes = new HashSet<>(types.size() + 1); - newTypes.addAll(types); - newTypes.add(type); - assert newTypes.size() == types.size() + 1; - newTypes = Collections.unmodifiableSet(newTypes); - return map.copyAndPut(key, newTypes); - } } /** @@ -77,58 +52,41 @@ private static CopyOnWriteHashMap> addType(CopyOnWriteHashMa * from the provided fields. If a field already exists, the field type will be updated * to use the new mappers field type. */ - public FieldTypeLookup copyAndAddAll(String type, Collection fieldMappers, boolean updateAllTypes) { + public FieldTypeLookup copyAndAddAll(String type, Collection fieldMappers) { Objects.requireNonNull(type, "type must not be null"); if (MapperService.DEFAULT_MAPPING.equals(type)) { throw new IllegalArgumentException("Default mappings should not be added to the lookup"); } CopyOnWriteHashMap fullName = this.fullNameToFieldType; - CopyOnWriteHashMap> fullNameToTypes = this.fullNameToTypes; for (FieldMapper fieldMapper : fieldMappers) { MappedFieldType fieldType = fieldMapper.fieldType(); MappedFieldType fullNameFieldType = fullName.get(fieldType.name()); - // is the update even legal? - checkCompatibility(type, fieldMapper, updateAllTypes); - - if (fieldType.equals(fullNameFieldType) == false) { + if (fullNameFieldType == null) { + // introduction of a new field fullName = fullName.copyAndPut(fieldType.name(), fieldMapper.fieldType()); + } else { + // modification of an existing field + checkCompatibility(fullNameFieldType, fieldType); + if (fieldType.equals(fullNameFieldType) == false) { + fullName = fullName.copyAndPut(fieldType.name(), fieldMapper.fieldType()); + } } - - fullNameToTypes = addType(fullNameToTypes, fieldType.name(), type); - } - return new FieldTypeLookup(fullName, fullNameToTypes); - } - - private static boolean beStrict(String type, Set types, boolean updateAllTypes) { - assert types.size() >= 1; - if (updateAllTypes) { - return false; - } else if (types.size() == 1 && types.contains(type)) { - // we are implicitly updating all types - return false; - } else { - return true; } + return new FieldTypeLookup(fullName); } /** * Checks if the given field type is compatible with an existing field type. * An IllegalArgumentException is thrown in case of incompatibility. - * If updateAllTypes is true, only basic compatibility is checked. */ - private void checkCompatibility(String type, FieldMapper fieldMapper, boolean updateAllTypes) { - MappedFieldType fieldType = fullNameToFieldType.get(fieldMapper.fieldType().name()); - if (fieldType != null) { - List conflicts = new ArrayList<>(); - final Set types = fullNameToTypes.get(fieldMapper.fieldType().name()); - boolean strict = beStrict(type, types, updateAllTypes); - fieldType.checkCompatibility(fieldMapper.fieldType(), conflicts, strict); - if (conflicts.isEmpty() == false) { - throw new IllegalArgumentException("Mapper for [" + fieldMapper.fieldType().name() + "] conflicts with existing mapping in other types:\n" + conflicts.toString()); - } + private void checkCompatibility(MappedFieldType existingFieldType, MappedFieldType newFieldType) { + List conflicts = new ArrayList<>(); + existingFieldType.checkCompatibility(newFieldType, conflicts); + if (conflicts.isEmpty() == false) { + throw new IllegalArgumentException("Mapper for [" + newFieldType.name() + "] conflicts with existing mapping:\n" + conflicts.toString()); } } @@ -137,15 +95,6 @@ public MappedFieldType get(String field) { return fullNameToFieldType.get(field); } - /** Get the set of types that have a mapping for the given field. */ - public Set getTypes(String field) { - Set types = fullNameToTypes.get(field); - if (types == null) { - types = Collections.emptySet(); - } - return types; - } - /** * Returns a list of the full names of a simple match regex like pattern against full name and index name. */ diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java index 45237eb572d2c..7b9eb5f067a67 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java @@ -142,8 +142,8 @@ public GeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedF } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); GeoPointFieldMapper gpfmMergeWith = (GeoPointFieldMapper) mergeWith; if (gpfmMergeWith.ignoreMalformed.explicit()) { this.ignoreMalformed = gpfmMergeWith.ignoreMalformed; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java index 68d6ac66678e7..9e2a17817acde 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java @@ -309,8 +309,8 @@ public void freeze() { } @Override - public void checkCompatibility(MappedFieldType fieldType, List conflicts, boolean strict) { - super.checkCompatibility(fieldType, conflicts, strict); + public void checkCompatibility(MappedFieldType fieldType, List conflicts) { + super.checkCompatibility(fieldType, conflicts); GeoShapeFieldType other = (GeoShapeFieldType)fieldType; // prevent user from changing strategies if (strategyName().equals(other.strategyName()) == false) { @@ -334,15 +334,6 @@ public void checkCompatibility(MappedFieldType fieldType, List conflicts if (precisionInMeters() != other.precisionInMeters()) { conflicts.add("mapper [" + name() + "] has different [precision]"); } - - if (strict) { - if (orientation() != other.orientation()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [orientation] across all types."); - } - if (distanceErrorPct() != other.distanceErrorPct()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [distance_error_pct] across all types."); - } - } } private static int getLevels(int treeLevels, double precisionInMeters, int defaultLevels, boolean geoHash) { @@ -511,8 +502,8 @@ protected void parseCreateField(ParseContext context, List field } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); GeoShapeFieldMapper gsfm = (GeoShapeFieldMapper)mergeWith; if (gsfm.coerce.explicit()) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java index 41256d3a5bb58..e60b27fce7239 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java @@ -314,7 +314,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { // do nothing here, no merging, but also no exception } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java index 1bdb125b4e7cb..8e92ecc8bf686 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java @@ -189,7 +189,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { // nothing to do } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index bc811d041e313..c10c2339b895e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -390,8 +390,8 @@ protected void parseCreateField(ParseContext context, List field } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); IpFieldMapper other = (IpFieldMapper) mergeWith; if (other.ignoreMalformed.explicit()) { this.ignoreMalformed = other.ignoreMalformed; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index cb2c4b6b6fddf..76163929e68ae 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -187,8 +187,8 @@ public boolean equals(Object o) { } @Override - public void checkCompatibility(MappedFieldType otherFT, List conflicts, boolean strict) { - super.checkCompatibility(otherFT, conflicts, strict); + public void checkCompatibility(MappedFieldType otherFT, List conflicts) { + super.checkCompatibility(otherFT, conflicts); KeywordFieldType other = (KeywordFieldType) otherFT; if (Objects.equals(normalizer, other.normalizer) == false) { conflicts.add("mapper [" + name() + "] has different [normalizer]"); @@ -352,8 +352,8 @@ protected String contentType() { } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); this.ignoreAbove = ((KeywordFieldMapper) mergeWith).ignoreAbove; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 6eab90875345b..69189ab129762 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -157,7 +157,7 @@ private void checkTypeName(MappedFieldType other) { * If strict is true, all properties must be equal. * Otherwise, only properties which must never change in an index are checked. */ - public void checkCompatibility(MappedFieldType other, List conflicts, boolean strict) { + public void checkCompatibility(MappedFieldType other, List conflicts) { checkTypeName(other); boolean indexed = indexOptions() != IndexOptions.NONE; @@ -202,27 +202,6 @@ public void checkCompatibility(MappedFieldType other, List conflicts, bo if (Objects.equals(similarity(), other.similarity()) == false) { conflicts.add("mapper [" + name() + "] has different [similarity]"); } - - if (strict) { - if (omitNorms() != other.omitNorms()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [omit_norms] across all types."); - } - if (boost() != other.boost()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [boost] across all types."); - } - if (Objects.equals(searchAnalyzer(), other.searchAnalyzer()) == false) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [search_analyzer] across all types."); - } - if (Objects.equals(searchQuoteAnalyzer(), other.searchQuoteAnalyzer()) == false) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [search_quote_analyzer] across all types."); - } - if (Objects.equals(nullValue(), other.nullValue()) == false) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [null_value] across all types."); - } - if (eagerGlobalOrdinals() != other.eagerGlobalOrdinals()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [eager_global_ordinals] across all types."); - } - } } public String name() { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 4f78ec0ad9561..051ac9da7f2ec 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -175,7 +175,7 @@ public final String simpleName() { /** Return the merge of {@code mergeWith} into this. * Both {@code this} and {@code mergeWith} will be left unmodified. */ - public abstract Mapper merge(Mapper mergeWith, boolean updateAllTypes); + public abstract Mapper merge(Mapper mergeWith); /** * Update the field type of this mapper. This is necessary because some mapping updates diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 51ebe9d980b7a..a04673eca4c38 100755 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -215,7 +215,7 @@ public boolean updateMapping(IndexMetaData indexMetaData) throws IOException { final Map updatedEntries; try { // only update entries if needed - updatedEntries = internalMerge(indexMetaData, MergeReason.MAPPING_RECOVERY, true, true); + updatedEntries = internalMerge(indexMetaData, MergeReason.MAPPING_RECOVERY, true); } catch (Exception e) { logger.warn((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("[{}] failed to apply mappings", index()), e); throw e; @@ -250,7 +250,7 @@ public boolean updateMapping(IndexMetaData indexMetaData) throws IOException { return requireRefresh; } - public void merge(Map> mappings, MergeReason reason, boolean updateAllTypes) { + public void merge(Map> mappings, MergeReason reason) { Map mappingSourcesCompressed = new LinkedHashMap<>(mappings.size()); for (Map.Entry> entry : mappings.entrySet()) { try { @@ -260,19 +260,18 @@ public void merge(Map> mappings, MergeReason reason, } } - internalMerge(mappingSourcesCompressed, reason, updateAllTypes); + internalMerge(mappingSourcesCompressed, reason); } - public void merge(IndexMetaData indexMetaData, MergeReason reason, boolean updateAllTypes) { - internalMerge(indexMetaData, reason, updateAllTypes, false); + public void merge(IndexMetaData indexMetaData, MergeReason reason) { + internalMerge(indexMetaData, reason, false); } - public DocumentMapper merge(String type, CompressedXContent mappingSource, MergeReason reason, boolean updateAllTypes) { - return internalMerge(Collections.singletonMap(type, mappingSource), reason, updateAllTypes).get(type); + public DocumentMapper merge(String type, CompressedXContent mappingSource, MergeReason reason) { + return internalMerge(Collections.singletonMap(type, mappingSource), reason).get(type); } - private synchronized Map internalMerge(IndexMetaData indexMetaData, MergeReason reason, boolean updateAllTypes, - boolean onlyUpdateIfNeeded) { + private synchronized Map internalMerge(IndexMetaData indexMetaData, MergeReason reason, boolean onlyUpdateIfNeeded) { Map map = new LinkedHashMap<>(); for (ObjectCursor cursor : indexMetaData.getMappings().values()) { MappingMetaData mappingMetaData = cursor.value; @@ -285,10 +284,10 @@ private synchronized Map internalMerge(IndexMetaData ind map.put(mappingMetaData.type(), mappingMetaData.source()); } } - return internalMerge(map, reason, updateAllTypes); + return internalMerge(map, reason); } - private synchronized Map internalMerge(Map mappings, MergeReason reason, boolean updateAllTypes) { + private synchronized Map internalMerge(Map mappings, MergeReason reason) { DocumentMapper defaultMapper = null; String defaultMappingSource = null; @@ -336,7 +335,7 @@ private synchronized Map internalMerge(Map internalMerge(@Nullable DocumentMapper defaultMapper, @Nullable String defaultMappingSource, - List documentMappers, MergeReason reason, boolean updateAllTypes) { + List documentMappers, MergeReason reason) { boolean hasNested = this.hasNested; Map fullPathObjectMappers = this.fullPathObjectMappers; FieldTypeLookup fieldTypes = this.fieldTypes; @@ -392,7 +391,7 @@ private synchronized Map internalMerge(@Nullable Documen DocumentMapper oldMapper = mappers.get(mapper.type()); DocumentMapper newMapper; if (oldMapper != null) { - newMapper = oldMapper.merge(mapper.mapping(), updateAllTypes); + newMapper = oldMapper.merge(mapper.mapping()); } else { newMapper = mapper; } @@ -403,12 +402,12 @@ private synchronized Map internalMerge(@Nullable Documen Collections.addAll(fieldMappers, newMapper.mapping().metadataMappers); MapperUtils.collect(newMapper.mapping().root(), objectMappers, fieldMappers); checkFieldUniqueness(newMapper.type(), objectMappers, fieldMappers, fullPathObjectMappers, fieldTypes); - checkObjectsCompatibility(objectMappers, updateAllTypes, fullPathObjectMappers); + checkObjectsCompatibility(objectMappers, fullPathObjectMappers); checkPartitionedIndexConstraints(newMapper); // update lookup data-structures // this will in particular make sure that the merged fields are compatible with other types - fieldTypes = fieldTypes.copyAndAddAll(newMapper.type(), fieldMappers, updateAllTypes); + fieldTypes = fieldTypes.copyAndAddAll(newMapper.type(), fieldMappers); for (ObjectMapper objectMapper : objectMappers) { if (fullPathObjectMappers == this.fullPathObjectMappers) { @@ -575,14 +574,14 @@ private static void checkFieldUniqueness(String type, Collection o } } - private static void checkObjectsCompatibility(Collection objectMappers, boolean updateAllTypes, + private static void checkObjectsCompatibility(Collection objectMappers, Map fullPathObjectMappers) { for (ObjectMapper newObjectMapper : objectMappers) { ObjectMapper existingObjectMapper = fullPathObjectMappers.get(newObjectMapper.fullPath()); if (existingObjectMapper != null) { // simulate a merge and ignore the result, we are just interested // in exceptions here - existingObjectMapper.merge(newObjectMapper, updateAllTypes); + existingObjectMapper.merge(newObjectMapper); } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java index 8a90de4d47aa5..bd92cf6d00970 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java @@ -84,9 +84,9 @@ public T metadataMapper(Class clazz) { return (T) metadataMappersMap.get(clazz); } - /** @see DocumentMapper#merge(Mapping, boolean) */ - public Mapping merge(Mapping mergeWith, boolean updateAllTypes) { - RootObjectMapper mergedRoot = root.merge(mergeWith.root, updateAllTypes); + /** @see DocumentMapper#merge(Mapping) */ + public Mapping merge(Mapping mergeWith) { + RootObjectMapper mergedRoot = root.merge(mergeWith.root); Map, MetadataFieldMapper> mergedMetaDataMappers = new HashMap<>(metadataMappersMap); for (MetadataFieldMapper metaMergeWith : mergeWith.metadataMappers) { MetadataFieldMapper mergeInto = mergedMetaDataMappers.get(metaMergeWith.getClass()); @@ -94,7 +94,7 @@ public Mapping merge(Mapping mergeWith, boolean updateAllTypes) { if (mergeInto == null) { merged = metaMergeWith; } else { - merged = mergeInto.merge(metaMergeWith, updateAllTypes); + merged = mergeInto.merge(metaMergeWith); } mergedMetaDataMappers.put(merged.getClass(), merged); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java index 0833e8f33f30f..1240250a74743 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java @@ -37,8 +37,6 @@ public interface TypeParser extends Mapper.TypeParser { /** * Get the default {@link MetadataFieldMapper} to use, if nothing had to be parsed. - * @param fieldType null if this is the first root mapper on this index, the existing - * fieldType for this index otherwise * @param fieldType the existing field type for this meta mapper on the current index * or null if this is the first type being introduced * @param parserContext context that may be useful to build the field like analyzers @@ -69,7 +67,7 @@ protected MetadataFieldMapper(String simpleName, MappedFieldType fieldType, Mapp public abstract void postParse(ParseContext context) throws IOException; @Override - public MetadataFieldMapper merge(Mapper mergeWith, boolean updateAllTypes) { - return (MetadataFieldMapper) super.merge(mergeWith, updateAllTypes); + public MetadataFieldMapper merge(Mapper mergeWith) { + return (MetadataFieldMapper) super.merge(mergeWith); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index a44611d6406a1..92cb44cfd147f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -1019,8 +1019,8 @@ protected void parseCreateField(ParseContext context, List field } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); NumberFieldMapper other = (NumberFieldMapper) mergeWith; if (other.ignoreMalformed.explicit()) { this.ignoreMalformed = other.ignoreMalformed; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index d83ce173d6896..c96d8bb384bb6 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -31,7 +31,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.fielddata.ScriptDocValues; import java.io.IOException; import java.util.ArrayList; @@ -139,7 +138,7 @@ public Y build(BuilderContext context) { Mapper mapper = builder.build(context); Mapper existing = mappers.get(mapper.simpleName()); if (existing != null) { - mapper = existing.merge(mapper, false); + mapper = existing.merge(mapper); } mappers.put(mapper.simpleName(), mapper); } @@ -426,17 +425,17 @@ public boolean parentObjectMapperAreNested(MapperService mapperService) { } @Override - public ObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) { + public ObjectMapper merge(Mapper mergeWith) { if (!(mergeWith instanceof ObjectMapper)) { throw new IllegalArgumentException("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]"); } ObjectMapper mergeWithObject = (ObjectMapper) mergeWith; ObjectMapper merged = clone(); - merged.doMerge(mergeWithObject, updateAllTypes); + merged.doMerge(mergeWithObject); return merged; } - protected void doMerge(final ObjectMapper mergeWith, boolean updateAllTypes) { + protected void doMerge(final ObjectMapper mergeWith) { if (nested().isNested()) { if (!mergeWith.nested().isNested()) { throw new IllegalArgumentException("object mapping [" + name() + "] can't be changed from nested to non-nested"); @@ -459,7 +458,7 @@ protected void doMerge(final ObjectMapper mergeWith, boolean updateAllTypes) { merged = mergeWithMapper; } else { // root mappers can only exist here for backcompat, and are merged in Mapping - merged = mergeIntoMapper.merge(mergeWithMapper, updateAllTypes); + merged = mergeIntoMapper.merge(mergeWithMapper); } putMapper(merged); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java index 34eaf569ca949..1d3588ae5a745 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java @@ -301,7 +301,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { ParentFieldMapper fieldMergeWith = (ParentFieldMapper) mergeWith; if (fieldMergeWith.parentType != null && Objects.equals(parentType, fieldMergeWith.parentType) == false) { throw new IllegalArgumentException("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]"); @@ -310,7 +310,7 @@ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { // update that does not explicitly configure the _parent field, so we // ignore it. if (fieldMergeWith.active()) { - super.doMerge(mergeWith, updateAllTypes); + super.doMerge(mergeWith); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java b/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java index 11804c2e88e1d..0c740a0af7c8b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java @@ -131,7 +131,7 @@ public void addDynamicMappingsUpdate(Mapping update) { if (dynamicMappingsUpdate == null) { dynamicMappingsUpdate = update; } else { - dynamicMappingsUpdate = dynamicMappingsUpdate.merge(update, false); + dynamicMappingsUpdate = dynamicMappingsUpdate.merge(update); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java index 9a00ddebe83ba..1536db6510fc7 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java @@ -256,29 +256,6 @@ public String typeName() { return rangeType.name; } - @Override - public void checkCompatibility(MappedFieldType fieldType, List conflicts, boolean strict) { - super.checkCompatibility(fieldType, conflicts, strict); - if (strict) { - RangeFieldType other = (RangeFieldType)fieldType; - if (this.rangeType != other.rangeType) { - conflicts.add("mapper [" + name() - + "] is attempting to update from type [" + rangeType.name - + "] to incompatible type [" + other.rangeType.name + "]."); - } - if (this.rangeType == RangeType.DATE) { - if (Objects.equals(dateTimeFormatter().format(), other.dateTimeFormatter().format()) == false) { - conflicts.add("mapper [" + name() - + "] is used by multiple types. Set update_all_types to true to update [format] across all types."); - } - if (Objects.equals(dateTimeFormatter().locale(), other.dateTimeFormatter().locale()) == false) { - conflicts.add("mapper [" + name() - + "] is used by multiple types. Set update_all_types to true to update [locale] across all types."); - } - } - } - } - public FormatDateTimeFormatter dateTimeFormatter() { return dateTimeFormatter; } @@ -416,8 +393,8 @@ protected void parseCreateField(ParseContext context, List field } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); RangeFieldMapper other = (RangeFieldMapper) mergeWith; if (other.coerce.explicit()) { this.coerce = other.coerce; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index 42341bfb96b2d..009caf2b8e814 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -268,13 +268,13 @@ public DynamicTemplate findTemplate(ContentPath path, String name, XContentField } @Override - public RootObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) { - return (RootObjectMapper) super.merge(mergeWith, updateAllTypes); + public RootObjectMapper merge(Mapper mergeWith) { + return (RootObjectMapper) super.merge(mergeWith); } @Override - protected void doMerge(ObjectMapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(ObjectMapper mergeWith) { + super.doMerge(mergeWith); RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith; if (mergeWithObject.numericDetection.explicit()) { this.numericDetection = mergeWithObject.numericDetection; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java index a4b009f9f1fa3..25cfc71261b0a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java @@ -201,7 +201,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { // do nothing here, no merging, but also no exception } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java index 01b302797e2ec..197d555736343 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java @@ -278,7 +278,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { // nothing to do } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index 47d5e64438e57..b4a8330e23803 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -291,7 +291,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { SourceFieldMapper sourceMergeWith = (SourceFieldMapper) mergeWith; List conflicts = new ArrayList<>(); if (this.enabled != sourceMergeWith.enabled) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index ae99f743fe57f..4d67ec4cfbc19 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -212,31 +212,6 @@ public int hashCode() { fielddataMinFrequency, fielddataMaxFrequency, fielddataMinSegmentSize); } - @Override - public void checkCompatibility(MappedFieldType other, - List conflicts, boolean strict) { - super.checkCompatibility(other, conflicts, strict); - TextFieldType otherType = (TextFieldType) other; - if (strict) { - if (fielddata() != otherType.fielddata()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [fielddata] " - + "across all types."); - } - if (fielddataMinFrequency() != otherType.fielddataMinFrequency()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update " - + "[fielddata_frequency_filter.min] across all types."); - } - if (fielddataMaxFrequency() != otherType.fielddataMaxFrequency()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update " - + "[fielddata_frequency_filter.max] across all types."); - } - if (fielddataMinSegmentSize() != otherType.fielddataMinSegmentSize()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update " - + "[fielddata_frequency_filter.min_segment_size] across all types."); - } - } - } - public boolean fielddata() { return fielddata; } @@ -357,8 +332,8 @@ protected String contentType() { } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java index 712e9edec9e27..b47242d02b0f3 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java @@ -316,7 +316,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { // do nothing here, no merging, but also no exception } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java index 95dc40bca637a..04e791b8cee1e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java @@ -229,7 +229,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { // do nothing here, no merging, but also no exception } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java index c5ead1327cc9b..bedb98e2126ac 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java @@ -145,7 +145,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { // nothing to do } } diff --git a/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java index 9f91b16359287..aea3677e33e13 100644 --- a/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java @@ -33,6 +33,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.AbstractQueryBuilder; @@ -86,11 +87,11 @@ private Analyzer getAnalyzer(MappedFieldType ft) { } /** - * Rethrow the runtime exception, unless the lenient flag has been set, returns null + * Rethrow the runtime exception, unless the lenient flag has been set, returns {@link MatchNoDocsQuery} */ private Query rethrowUnlessLenient(RuntimeException e) { if (settings.lenient()) { - return null; + return Queries.newMatchNoDocsQuery("failed query, caused by " + e.getMessage()); } throw e; } @@ -115,7 +116,7 @@ public Query newDefaultQuery(String text) { try { return queryBuilder.parse(MultiMatchQueryBuilder.Type.MOST_FIELDS, weights, text, null); } catch (IOException e) { - return rethrowUnlessLenient(new IllegalArgumentException(e.getMessage())); + return rethrowUnlessLenient(new IllegalStateException(e.getMessage())); } } @@ -135,7 +136,7 @@ public Query newFuzzyQuery(String text, int fuzziness) { settings.fuzzyMaxExpansions, settings.fuzzyTranspositions); disjuncts.add(wrapWithBoost(query, entry.getValue())); } catch (RuntimeException e) { - rethrowUnlessLenient(e); + disjuncts.add(rethrowUnlessLenient(e)); } } if (disjuncts.size() == 1) { @@ -156,7 +157,7 @@ public Query newPhraseQuery(String text, int slop) { } return queryBuilder.parse(MultiMatchQueryBuilder.Type.PHRASE, phraseWeights, text, null); } catch (IOException e) { - return rethrowUnlessLenient(new IllegalArgumentException(e.getMessage())); + return rethrowUnlessLenient(new IllegalStateException(e.getMessage())); } finally { queryBuilder.setPhraseSlop(0); } @@ -184,7 +185,7 @@ public Query newPrefixQuery(String text) { disjuncts.add(wrapWithBoost(query, entry.getValue())); } } catch (RuntimeException e) { - return rethrowUnlessLenient(e); + disjuncts.add(rethrowUnlessLenient(e)); } } if (disjuncts.size() == 1) { diff --git a/server/src/main/java/org/elasticsearch/index/seqno/CountedBitSet.java b/server/src/main/java/org/elasticsearch/index/seqno/CountedBitSet.java index 54270de1b01c8..d1f6f4a3a3745 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/CountedBitSet.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/CountedBitSet.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.seqno; -import org.apache.lucene.util.BitSet; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.RamUsageEstimator; @@ -28,7 +27,7 @@ * when all bits are set to reduce memory usage. This structure can work well for sequence numbers as * these numbers are likely to form contiguous ranges (eg. filling all bits). */ -public final class CountedBitSet extends BitSet { +public final class CountedBitSet { static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(CountedBitSet.class); private short onBits; // Number of bits are set. private FixedBitSet bitset; @@ -41,14 +40,12 @@ public CountedBitSet(short numBits) { this.bitset = new FixedBitSet(numBits); } - @Override public boolean get(int index) { assert 0 <= index && index < this.length(); assert bitset == null || onBits < bitset.length() : "Bitset should be released when all bits are set"; return bitset == null ? true : bitset.get(index); } - @Override public void set(int index) { assert 0 <= index && index < this.length(); assert bitset == null || onBits < bitset.length() : "Bitset should be released when all bits are set"; @@ -67,41 +64,16 @@ public void set(int index) { } } - @Override - public void clear(int startIndex, int endIndex) { - throw new UnsupportedOperationException(); - } - - @Override - public void clear(int index) { - throw new UnsupportedOperationException(); - } + // Below methods are pkg-private for testing - @Override - public int cardinality() { + int cardinality() { return onBits; } - @Override - public int length() { + int length() { return bitset == null ? onBits : bitset.length(); } - @Override - public int prevSetBit(int index) { - throw new UnsupportedOperationException(); - } - - @Override - public int nextSetBit(int index) { - throw new UnsupportedOperationException(); - } - - @Override - public long ramBytesUsed() { - return BASE_RAM_BYTES_USED + (bitset == null ? 0 : bitset.ramBytesUsed()); - } - boolean isInternalBitsetReleased() { return bitset == null; } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java index 95e3505e7467e..0ec03cb7a8f5e 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java @@ -130,10 +130,9 @@ protected ReplicaResult shardOperationOnReplica(final Request request, final Ind } private void maybeSyncTranslog(final IndexShard indexShard) throws IOException { - final Translog translog = indexShard.getTranslog(); if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST && - translog.getLastSyncedGlobalCheckpoint() < indexShard.getGlobalCheckpoint()) { - indexShard.getTranslog().sync(); + indexShard.getTranslog().getLastSyncedGlobalCheckpoint() < indexShard.getGlobalCheckpoint()) { + indexShard.sync(); } } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java index 34926a36f4573..cd33c1bf046ed 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.seqno; import com.carrotsearch.hppc.LongObjectHashMap; -import org.apache.lucene.util.BitSet; import org.elasticsearch.common.SuppressForbidden; /** @@ -39,7 +38,7 @@ public class LocalCheckpointTracker { * A collection of bit sets representing pending sequence numbers. Each sequence number is mapped to a bit set by dividing by the * bit set size. */ - final LongObjectHashMap processedSeqNo = new LongObjectHashMap<>(); + final LongObjectHashMap processedSeqNo = new LongObjectHashMap<>(); /** * The current local checkpoint, i.e., all sequence numbers no more than this number have been completed. @@ -96,7 +95,7 @@ public synchronized void markSeqNoAsCompleted(final long seqNo) { // this is possible during recovery where we might replay an operation that was also replicated return; } - final BitSet bitSet = getBitSetForSeqNo(seqNo); + final CountedBitSet bitSet = getBitSetForSeqNo(seqNo); final int offset = seqNoToBitSetOffset(seqNo); bitSet.set(offset); if (seqNo == checkpoint + 1) { @@ -170,7 +169,7 @@ assert getBitSetForSeqNo(checkpoint + 1).get(seqNoToBitSetOffset(checkpoint + 1) try { // keep it simple for now, get the checkpoint one by one; in the future we can optimize and read words long bitSetKey = getBitSetKey(checkpoint); - BitSet current = processedSeqNo.get(bitSetKey); + CountedBitSet current = processedSeqNo.get(bitSetKey); if (current == null) { // the bit set corresponding to the checkpoint has already been removed, set ourselves up for the next bit set assert checkpoint % BIT_SET_SIZE == BIT_SET_SIZE - 1; @@ -184,7 +183,7 @@ assert getBitSetForSeqNo(checkpoint + 1).get(seqNoToBitSetOffset(checkpoint + 1) */ if (checkpoint == lastSeqNoInBitSet(bitSetKey)) { assert current != null; - final BitSet removed = processedSeqNo.remove(bitSetKey); + final CountedBitSet removed = processedSeqNo.remove(bitSetKey); assert removed == current; current = processedSeqNo.get(++bitSetKey); } @@ -210,11 +209,11 @@ private long getBitSetKey(final long seqNo) { return seqNo / BIT_SET_SIZE; } - private BitSet getBitSetForSeqNo(final long seqNo) { + private CountedBitSet getBitSetForSeqNo(final long seqNo) { assert Thread.holdsLock(this); final long bitSetKey = getBitSetKey(seqNo); final int index = processedSeqNo.indexOf(bitSetKey); - final BitSet bitSet; + final CountedBitSet bitSet; if (processedSeqNo.indexExists(index)) { bitSet = processedSeqNo.indexGet(index); } else { diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 3832cd0ae2055..3ace9ededc5b3 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -23,6 +23,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.CheckIndex; +import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.LeafReaderContext; @@ -1290,12 +1291,16 @@ public void createIndexAndTranslog() throws IOException { /** opens the engine on top of the existing lucene engine but creates an empty translog **/ public void openIndexAndCreateTranslog(boolean forceNewHistoryUUID, long globalCheckpoint) throws IOException { - assert recoveryState.getRecoverySource().getType() != RecoverySource.Type.EMPTY_STORE && - recoveryState.getRecoverySource().getType() != RecoverySource.Type.EXISTING_STORE; - SequenceNumbers.CommitInfo commitInfo = store.loadSeqNoInfo(null); - assert commitInfo.localCheckpoint >= globalCheckpoint : - "trying to create a shard whose local checkpoint [" + commitInfo.localCheckpoint + "] is < global checkpoint [" + if (Assertions.ENABLED) { + assert recoveryState.getRecoverySource().getType() != RecoverySource.Type.EMPTY_STORE && + recoveryState.getRecoverySource().getType() != RecoverySource.Type.EXISTING_STORE; + SequenceNumbers.CommitInfo commitInfo = store.loadSeqNoInfo(null); + assert commitInfo.localCheckpoint >= globalCheckpoint : + "trying to create a shard whose local checkpoint [" + commitInfo.localCheckpoint + "] is < global checkpoint [" + globalCheckpoint + "]"; + final List existingCommits = DirectoryReader.listCommits(store.directory()); + assert existingCommits.size() == 1 : "Open index create translog should have one commit, commits[" + existingCommits + "]"; + } globalCheckpointTracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "opening index with a new translog"); innerOpenEngineAndTranslog(EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG, forceNewHistoryUUID); } @@ -2311,8 +2316,7 @@ public int getActiveOperationsCount() { @Override protected void write(List>> candidates) throws IOException { try { - final Engine engine = getEngine(); - engine.getTranslog().ensureSynced(candidates.stream().map(Tuple::v1)); + getEngine().ensureTranslogSynced(candidates.stream().map(Tuple::v1)); } catch (AlreadyClosedException ex) { // that's fine since we already synced everything on engine close - this also is conform with the methods // documentation @@ -2337,9 +2341,9 @@ public final void sync(Translog.Location location, Consumer syncListe translogSyncProcessor.put(location, syncListener); } - public final void sync() throws IOException { + public void sync() throws IOException { verifyNotClosed(); - getEngine().getTranslog().sync(); + getEngine().syncTranslog(); } /** diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 81ffbea642c58..c3b4525924ae3 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -112,7 +112,7 @@ boolean recoverFromLocalShards(BiConsumer mappingUpdate for (ObjectObjectCursor mapping : sourceMetaData.getMappings()) { mappingUpdateConsumer.accept(mapping.key, mapping.value); } - indexShard.mapperService().merge(sourceMetaData, MapperService.MergeReason.MAPPING_RECOVERY, true); + indexShard.mapperService().merge(sourceMetaData, MapperService.MergeReason.MAPPING_RECOVERY); // now that the mapping is merged we can validate the index sort configuration. Sort indexSort = indexShard.getIndexSort(); final boolean hasNested = indexShard.mapperService().hasNested(); diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 74be98b813238..7aab2c750d139 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -182,17 +182,9 @@ public Directory directory() { * @throws IOException if the index is corrupted or the segments file is not present */ public SegmentInfos readLastCommittedSegmentsInfo() throws IOException { - return readCommittedSegmentsInfo(null); - } - - /** - * Returns the committed segments info for the given commit point. - * If the commit point is not provided, this method will return the segments info of the last commit in the store. - */ - public SegmentInfos readCommittedSegmentsInfo(final IndexCommit commit) throws IOException { failIfCorrupted(); try { - return readSegmentsInfo(commit, directory()); + return readSegmentsInfo(null, directory()); } catch (CorruptIndexException ex) { markStoreCorrupted(ex); throw ex; diff --git a/server/src/main/java/org/elasticsearch/index/translog/MultiSnapshot.java b/server/src/main/java/org/elasticsearch/index/translog/MultiSnapshot.java index 910d71a51a0a7..7ea241958f87c 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/MultiSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/translog/MultiSnapshot.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.translog; import com.carrotsearch.hppc.LongObjectHashMap; -import org.apache.lucene.util.BitSet; import org.elasticsearch.index.seqno.CountedBitSet; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -85,7 +84,7 @@ public void close() throws IOException { static final class SeqNoSet { static final short BIT_SET_SIZE = 1024; - private final LongObjectHashMap bitSets = new LongObjectHashMap<>(); + private final LongObjectHashMap bitSets = new LongObjectHashMap<>(); /** * Marks this sequence number and returns true if it is seen before. @@ -93,7 +92,7 @@ static final class SeqNoSet { boolean getAndSet(long value) { assert value >= 0; final long key = value / BIT_SET_SIZE; - BitSet bitset = bitSets.get(key); + CountedBitSet bitset = bitSets.get(key); if (bitset == null) { bitset = new CountedBitSet(BIT_SET_SIZE); bitSets.put(key, bitset); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 7e0bff5384183..f7c4a39eee380 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -495,7 +495,7 @@ public synchronized void verifyIndexMetadata(IndexMetaData metaData, IndexMetaDa final IndexService service = createIndexService("metadata verification", metaData, indicesQueryCache, indicesFieldDataCache, emptyList()); closeables.add(() -> service.close("metadata verification", false)); - service.mapperService().merge(metaData, MapperService.MergeReason.MAPPING_RECOVERY, true); + service.mapperService().merge(metaData, MapperService.MergeReason.MAPPING_RECOVERY); if (metaData.equals(metaDataUpdate) == false) { service.updateMetaData(metaDataUpdate); } diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java index c52cce0780863..87e15b910f6ad 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java @@ -47,16 +47,6 @@ public class JvmInfo implements Writeable, ToXContentFragment { RuntimeMXBean runtimeMXBean = ManagementFactory.getRuntimeMXBean(); MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean(); - // returns the @ - long pid; - String xPid = runtimeMXBean.getName(); - try { - xPid = xPid.split("@")[0]; - pid = Long.parseLong(xPid); - } catch (Exception e) { - pid = -1; - } - long heapInit = memoryMXBean.getHeapMemoryUsage().getInit() < 0 ? 0 : memoryMXBean.getHeapMemoryUsage().getInit(); long heapMax = memoryMXBean.getHeapMemoryUsage().getMax() < 0 ? 0 : memoryMXBean.getHeapMemoryUsage().getMax(); long nonHeapInit = memoryMXBean.getNonHeapMemoryUsage().getInit() < 0 ? 0 : memoryMXBean.getNonHeapMemoryUsage().getInit(); @@ -160,7 +150,7 @@ public class JvmInfo implements Writeable, ToXContentFragment { } - INSTANCE = new JvmInfo(pid, System.getProperty("java.version"), runtimeMXBean.getVmName(), runtimeMXBean.getVmVersion(), + INSTANCE = new JvmInfo(JvmPid.getPid(), System.getProperty("java.version"), runtimeMXBean.getVmName(), runtimeMXBean.getVmVersion(), runtimeMXBean.getVmVendor(), runtimeMXBean.getStartTime(), configuredInitialHeapSize, configuredMaxHeapSize, mem, inputArguments, bootClassPath, classPath, systemProperties, gcCollectors, memoryPools, onError, onOutOfMemoryError, useCompressedOops, useG1GC, useSerialGC); diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmPid.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmPid.java new file mode 100644 index 0000000000000..2b1b2a1df478a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmPid.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.jvm; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.logging.Loggers; + +import java.lang.management.ManagementFactory; + +class JvmPid { + + private static final long PID; + + static long getPid() { + return PID; + } + + static { + PID = initializePid(); + } + + private static long initializePid() { + final String name = ManagementFactory.getRuntimeMXBean().getName(); + try { + return Long.parseLong(name.split("@")[0]); + } catch (final NumberFormatException e) { + Loggers.getLogger(JvmPid.class).debug(new ParameterizedMessage("failed parsing PID from [{}]", name), e); + return -1; + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java b/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java index 41f0ed86116ad..7454d74349ea6 100644 --- a/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java @@ -42,6 +42,7 @@ import java.util.Objects; import java.util.function.Supplier; import java.util.function.UnaryOperator; +import java.util.stream.Collectors; /** * An additional extension point for {@link Plugin}s that extends Elasticsearch's scripting functionality. Implement it like this: @@ -62,6 +63,15 @@ public interface ActionPlugin { default List> getActions() { return Collections.emptyList(); } + + /** + * Client actions added by this plugin. This defaults to all of the {@linkplain GenericAction} in + * {@linkplain ActionPlugin#getActions()}. + */ + default List getClientActions() { + return getActions().stream().map(a -> a.action).collect(Collectors.toList()); + } + /** * Action filters added by this plugin. */ diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java index 6a741fd3951d3..0934d8557158b 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java @@ -49,7 +49,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC if (request.hasContent()) { createIndexRequest.source(request.content(), request.getXContentType()); } - createIndexRequest.updateAllTypes(request.paramAsBoolean("update_all_types", false)); createIndexRequest.timeout(request.paramAsTime("timeout", createIndexRequest.timeout())); createIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", createIndexRequest.masterNodeTimeout())); createIndexRequest.waitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java index 51ff743d2d128..8cf4707262ed6 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.admin.indices; -import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java index 8d7e4a9e6c836..cdac83037db30 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java @@ -70,7 +70,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC PutMappingRequest putMappingRequest = putMappingRequest(Strings.splitStringByCommaToArray(request.param("index"))); putMappingRequest.type(request.param("type")); putMappingRequest.source(request.requiredContent(), request.getXContentType()); - putMappingRequest.updateAllTypes(request.paramAsBoolean("update_all_types", false)); putMappingRequest.timeout(request.paramAsTime("timeout", putMappingRequest.timeout())); putMappingRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putMappingRequest.masterNodeTimeout())); putMappingRequest.indicesOptions(IndicesOptions.fromRequest(request, putMappingRequest.indicesOptions())); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java index 5b36063e17ac0..58a15bbb36684 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java @@ -147,17 +147,15 @@ protected AggregatorFactory doBuild(SearchContext context, AggregatorFactory< Sort sort = indexSortConfig.buildIndexSort(shardContext::fieldMapper, shardContext::getForField); System.arraycopy(sort.getSort(), 0, sortFields, 0, sortFields.length); } - List sourceNames = new ArrayList<>(); for (int i = 0; i < configs.length; i++) { configs[i] = sources.get(i).build(context, i, configs.length, sortFields[i]); - sourceNames.add(sources.get(i).name()); if (configs[i].valuesSource().needsScores()) { throw new IllegalArgumentException("[sources] cannot access _score"); } } final CompositeKey afterKey; if (after != null) { - if (after.size() != sources.size()) { + if (after.size() != configs.length) { throw new IllegalArgumentException("[after] has " + after.size() + " value(s) but [sources] has " + sources.size()); } @@ -179,7 +177,7 @@ protected AggregatorFactory doBuild(SearchContext context, AggregatorFactory< } else { afterKey = null; } - return new CompositeAggregationFactory(name, context, parent, subfactoriesBuilder, metaData, size, configs, sourceNames, afterKey); + return new CompositeAggregationFactory(name, context, parent, subfactoriesBuilder, metaData, size, configs, afterKey); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java index c0aeb5304a580..2b2fa4fb7e3eb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java @@ -32,17 +32,14 @@ class CompositeAggregationFactory extends AggregatorFactory { private final int size; private final CompositeValuesSourceConfig[] sources; - private final List sourceNames; private final CompositeKey afterKey; CompositeAggregationFactory(String name, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData, - int size, CompositeValuesSourceConfig[] sources, - List sourceNames, CompositeKey afterKey) throws IOException { + int size, CompositeValuesSourceConfig[] sources, CompositeKey afterKey) throws IOException { super(name, context, parent, subFactoriesBuilder, metaData); this.size = size; this.sources = sources; - this.sourceNames = sourceNames; this.afterKey = afterKey; } @@ -50,6 +47,6 @@ class CompositeAggregationFactory extends AggregatorFactory pipelineAggregators, Map metaData) throws IOException { return new CompositeAggregator(name, factories, context, parent, pipelineAggregators, metaData, - size, sources, sourceNames, afterKey); + size, sources, afterKey); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java index 3467aaf318baf..e822480f9150d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -27,6 +27,7 @@ import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.RoaringDocIdSet; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -43,11 +44,13 @@ import java.util.List; import java.util.Map; import java.util.TreeMap; +import java.util.stream.Collectors; final class CompositeAggregator extends BucketsAggregator { private final int size; private final CompositeValuesSourceConfig[] sources; private final List sourceNames; + private final List formats; private final boolean canEarlyTerminate; private final TreeMap keys; @@ -59,12 +62,12 @@ final class CompositeAggregator extends BucketsAggregator { CompositeAggregator(String name, AggregatorFactories factories, SearchContext context, Aggregator parent, List pipelineAggregators, Map metaData, - int size, CompositeValuesSourceConfig[] sources, List sourceNames, - CompositeKey rawAfterKey) throws IOException { + int size, CompositeValuesSourceConfig[] sources, CompositeKey rawAfterKey) throws IOException { super(name, factories, context, parent, pipelineAggregators, metaData); this.size = size; this.sources = sources; - this.sourceNames = sourceNames; + this.sourceNames = Arrays.stream(sources).map(CompositeValuesSourceConfig::name).collect(Collectors.toList()); + this.formats = Arrays.stream(sources).map(CompositeValuesSourceConfig::format).collect(Collectors.toList()); // we use slot 0 to fill the current document (size+1). this.array = new CompositeValuesComparator(context.searcher().getIndexReader(), sources, size+1); if (rawAfterKey != null) { @@ -131,15 +134,17 @@ public InternalAggregation buildAggregation(long zeroBucket) throws IOException CompositeKey key = array.toCompositeKey(slot); InternalAggregations aggs = bucketAggregations(slot); int docCount = bucketDocCount(slot); - buckets[pos++] = new InternalComposite.InternalBucket(sourceNames, key, reverseMuls, docCount, aggs); + buckets[pos++] = new InternalComposite.InternalBucket(sourceNames, formats, key, reverseMuls, docCount, aggs); } - return new InternalComposite(name, size, sourceNames, Arrays.asList(buckets), reverseMuls, pipelineAggregators(), metaData()); + return new InternalComposite(name, size, sourceNames, formats, Arrays.asList(buckets), reverseMuls, + pipelineAggregators(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { final int[] reverseMuls = getReverseMuls(); - return new InternalComposite(name, size, sourceNames, Collections.emptyList(), reverseMuls, pipelineAggregators(), metaData()); + return new InternalComposite(name, size, sourceNames, formats, Collections.emptyList(), reverseMuls, + pipelineAggregators(), metaData()); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesComparator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesComparator.java index 849fe2c513e9b..0ce87460a5429 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesComparator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesComparator.java @@ -56,7 +56,7 @@ final class CompositeValuesComparator { if (vs.isFloatingPoint()) { arrays[i] = CompositeValuesSource.wrapDouble(vs, size, reverseMul); } else { - arrays[i] = CompositeValuesSource.wrapLong(vs, size, reverseMul); + arrays[i] = CompositeValuesSource.wrapLong(vs, sources[i].format(), size, reverseMul); } } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSource.java index 88d54744777e0..2d0368dfd4d28 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSource.java @@ -23,8 +23,10 @@ import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.sort.SortOrder; @@ -96,8 +98,9 @@ interface Collector { /** * Creates a {@link CompositeValuesSource} that generates long values. */ - static CompositeValuesSource wrapLong(ValuesSource.Numeric vs, int size, int reverseMul) { - return new LongValuesSource(vs, size, reverseMul); + static CompositeValuesSource wrapLong(ValuesSource.Numeric vs, DocValueFormat format, + int size, int reverseMul) { + return new LongValuesSource(vs, format, size, reverseMul); } /** @@ -273,9 +276,12 @@ Collector getLeafCollector(LeafReaderContext context, Collector next) throws IOE */ private static class LongValuesSource extends CompositeValuesSource { private final long[] values; + // handles "format" for date histogram source + private final DocValueFormat format; - LongValuesSource(ValuesSource.Numeric vs, int size, int reverseMul) { + LongValuesSource(ValuesSource.Numeric vs, DocValueFormat format, int size, int reverseMul) { super(vs, size, reverseMul); + this.format = format; this.values = new long[size]; } @@ -304,7 +310,11 @@ void setTop(Comparable value) { if (value instanceof Number) { topValue = ((Number) value).longValue(); } else { - topValue = Long.parseLong(value.toString()); + // for date histogram source with "format", the after value is formatted + // as a string so we need to retrieve the original value in milliseconds. + topValue = format.parseLong(value.toString(), false, () -> { + throw new IllegalArgumentException("now() is not supported in [after] key"); + }); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java index 2652d90f8c3e7..85d172907e013 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.SortField; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -51,6 +52,7 @@ public abstract class CompositeValuesSourceBuilder config = ValuesSourceConfig.resolve(context.getQueryShardContext(), - valueType, field, script, missing, null, null); + valueType, field, script, missing, null, format); return innerBuild(context, config, pos, numPos, sortField); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java index 4d5c1c8c84683..ee70d3f39a550 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java @@ -19,30 +19,47 @@ package org.elasticsearch.search.aggregations.bucket.composite; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.sort.SortOrder; class CompositeValuesSourceConfig { private final String name; private final ValuesSource vs; + private final DocValueFormat format; private final int reverseMul; private final boolean canEarlyTerminate; - CompositeValuesSourceConfig(String name, ValuesSource vs, SortOrder order, boolean canEarlyTerminate) { + CompositeValuesSourceConfig(String name, ValuesSource vs, DocValueFormat format, SortOrder order, boolean canEarlyTerminate) { this.name = name; this.vs = vs; + this.format = format; this.canEarlyTerminate = canEarlyTerminate; this.reverseMul = order == SortOrder.ASC ? 1 : -1; } + /** + * Returns the name associated with this configuration. + */ String name() { return name; } + /** + * Returns the {@link ValuesSource} for this configuration. + */ ValuesSource valuesSource() { return vs; } + /** + * The {@link DocValueFormat} to use for formatting the keys. + * {@link DocValueFormat#RAW} means no formatting. + */ + DocValueFormat format() { + return format; + } + /** * The sort order for the values source (e.g. -1 for descending and 1 for ascending). */ @@ -51,6 +68,9 @@ int reverseMul() { return reverseMul; } + /** + * Returns whether this {@link ValuesSource} is used to sort the index. + */ boolean canEarlyTerminate() { return canEarlyTerminate; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java index 0094da5069fd7..b7abf82a58ea3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java @@ -30,6 +30,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.Script; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.support.FieldContext; @@ -46,8 +48,8 @@ import static org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder.DATE_FIELD_UNITS; /** - * A {@link CompositeValuesSourceBuilder} that that builds a {@link RoundingValuesSource} from a {@link Script} or - * a field name. + * A {@link CompositeValuesSourceBuilder} that builds a {@link RoundingValuesSource} from a {@link Script} or + * a field name using the provided interval. */ public class DateHistogramValuesSourceBuilder extends CompositeValuesSourceBuilder { static final String TYPE = "date_histogram"; @@ -55,6 +57,7 @@ public class DateHistogramValuesSourceBuilder extends CompositeValuesSourceBuild private static final ObjectParser PARSER; static { PARSER = new ObjectParser<>(DateHistogramValuesSourceBuilder.TYPE); + PARSER.declareString(DateHistogramValuesSourceBuilder::format, new ParseField("format")); PARSER.declareField((histogram, interval) -> { if (interval instanceof Long) { histogram.interval((long) interval); @@ -235,7 +238,11 @@ protected CompositeValuesSourceConfig innerBuild(SearchContext context, canEarlyTerminate = checkCanEarlyTerminate(context.searcher().getIndexReader(), fieldContext.field(), order() == SortOrder.ASC ? false : true, sortField); } - return new CompositeValuesSourceConfig(name, vs, order(), canEarlyTerminate); + // dates are returned as timestamp in milliseconds-since-the-epoch unless a specific date format + // is specified in the builder. + final DocValueFormat docValueFormat = format() == null ? DocValueFormat.RAW : config.format(); + return new CompositeValuesSourceConfig(name, vs, docValueFormat, + order(), canEarlyTerminate); } else { throw new IllegalArgumentException("invalid source, expected numeric, got " + orig.getClass().getSimpleName()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java index dd5eb1b52d04c..83ada5dbbc3c3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java @@ -37,7 +37,7 @@ import java.util.Objects; /** - * A {@link CompositeValuesSourceBuilder} that that builds a {@link HistogramValuesSource} from another numeric values source + * A {@link CompositeValuesSourceBuilder} that builds a {@link HistogramValuesSource} from another numeric values source * using the provided interval. */ public class HistogramValuesSourceBuilder extends CompositeValuesSourceBuilder { @@ -128,7 +128,7 @@ protected CompositeValuesSourceConfig innerBuild(SearchContext context, canEarlyTerminate = checkCanEarlyTerminate(context.searcher().getIndexReader(), fieldContext.field(), order() == SortOrder.ASC ? false : true, sortField); } - return new CompositeValuesSourceConfig(name, vs, order(), canEarlyTerminate); + return new CompositeValuesSourceConfig(name, vs, config.format(), order(), canEarlyTerminate); } else { throw new IllegalArgumentException("invalid source, expected numeric, got " + orig.getClass().getSimpleName()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java index 824250948d740..fd9245a9c4a5b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java @@ -20,9 +20,11 @@ package org.elasticsearch.search.aggregations.bucket.composite; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; @@ -35,6 +37,7 @@ import java.util.AbstractSet; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -49,11 +52,14 @@ public class InternalComposite private final List buckets; private final int[] reverseMuls; private final List sourceNames; + private final List formats; - InternalComposite(String name, int size, List sourceNames, List buckets, int[] reverseMuls, + InternalComposite(String name, int size, List sourceNames, List formats, + List buckets, int[] reverseMuls, List pipelineAggregators, Map metaData) { super(name, pipelineAggregators, metaData); this.sourceNames = sourceNames; + this.formats = formats; this.buckets = buckets; this.size = size; this.reverseMuls = reverseMuls; @@ -63,14 +69,27 @@ public InternalComposite(StreamInput in) throws IOException { super(in); this.size = in.readVInt(); this.sourceNames = in.readList(StreamInput::readString); + this.formats = new ArrayList<>(sourceNames.size()); + for (int i = 0; i < sourceNames.size(); i++) { + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + formats.add(in.readNamedWriteable(DocValueFormat.class)); + } else { + formats.add(DocValueFormat.RAW); + } + } this.reverseMuls = in.readIntArray(); - this.buckets = in.readList((input) -> new InternalBucket(input, sourceNames, reverseMuls)); + this.buckets = in.readList((input) -> new InternalBucket(input, sourceNames, formats, reverseMuls)); } @Override protected void doWriteTo(StreamOutput out) throws IOException { out.writeVInt(size); out.writeStringList(sourceNames); + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + for (DocValueFormat format : formats) { + out.writeNamedWriteable(format); + } + } out.writeIntArray(reverseMuls); out.writeList(buckets); } @@ -87,12 +106,13 @@ public String getWriteableName() { @Override public InternalComposite create(List buckets) { - return new InternalComposite(name, size, sourceNames, buckets, reverseMuls, pipelineAggregators(), getMetaData()); + return new InternalComposite(name, size, sourceNames, formats, buckets, reverseMuls, pipelineAggregators(), getMetaData()); } @Override public InternalBucket createBucket(InternalAggregations aggregations, InternalBucket prototype) { - return new InternalBucket(prototype.sourceNames, prototype.key, prototype.reverseMuls, prototype.docCount, aggregations); + return new InternalBucket(prototype.sourceNames, prototype.formats, prototype.key, prototype.reverseMuls, + prototype.docCount, aggregations); } public int getSize() { @@ -149,7 +169,7 @@ public InternalAggregation doReduce(List aggregations, Redu reduceContext.consumeBucketsAndMaybeBreak(1); result.add(reduceBucket); } - return new InternalComposite(name, size, sourceNames, result, reverseMuls, pipelineAggregators(), metaData); + return new InternalComposite(name, size, sourceNames, formats, result, reverseMuls, pipelineAggregators(), metaData); } @Override @@ -191,18 +211,21 @@ static class InternalBucket extends InternalMultiBucketAggregation.InternalBucke private final InternalAggregations aggregations; private final transient int[] reverseMuls; private final transient List sourceNames; + private final transient List formats; - InternalBucket(List sourceNames, CompositeKey key, int[] reverseMuls, long docCount, InternalAggregations aggregations) { + InternalBucket(List sourceNames, List formats, CompositeKey key, int[] reverseMuls, long docCount, + InternalAggregations aggregations) { this.key = key; this.docCount = docCount; this.aggregations = aggregations; this.reverseMuls = reverseMuls; this.sourceNames = sourceNames; + this.formats = formats; } @SuppressWarnings("unchecked") - InternalBucket(StreamInput in, List sourceNames, int[] reverseMuls) throws IOException { + InternalBucket(StreamInput in, List sourceNames, List formats, int[] reverseMuls) throws IOException { final Comparable[] values = new Comparable[in.readVInt()]; for (int i = 0; i < values.length; i++) { values[i] = (Comparable) in.readGenericValue(); @@ -212,6 +235,7 @@ static class InternalBucket extends InternalMultiBucketAggregation.InternalBucke this.aggregations = InternalAggregations.readAggregations(in); this.reverseMuls = reverseMuls; this.sourceNames = sourceNames; + this.formats = formats; } @Override @@ -242,9 +266,11 @@ public boolean equals(Object obj) { @Override public Map getKey() { - return new ArrayMap(sourceNames, key.values()); + // returns the formatted key in a map + return new ArrayMap(sourceNames, formats, key.values()); } + // get the raw key (without formatting to preserve the natural order). // visible for testing CompositeKey getRawKey() { return key; @@ -260,7 +286,7 @@ public String getKeyAsString() { } builder.append(sourceNames.get(i)); builder.append('='); - builder.append(formatObject(key.get(i))); + builder.append(formatObject(key.get(i), formats.get(i))); } builder.append('}'); return builder.toString(); @@ -284,7 +310,7 @@ InternalBucket reduce(List buckets, ReduceContext reduceContext) aggregations.add(bucket.aggregations); } InternalAggregations aggs = InternalAggregations.reduce(aggregations, reduceContext); - return new InternalBucket(sourceNames, key, reverseMuls, docCount, aggs); + return new InternalBucket(sourceNames, formats, key, reverseMuls, docCount, aggs); } @Override @@ -303,26 +329,52 @@ public int compareKey(InternalBucket other) { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { /** - * See {@link CompositeAggregation#bucketToXContentFragment} + * See {@link CompositeAggregation#bucketToXContent} */ throw new UnsupportedOperationException("not implemented"); } } - static Object formatObject(Object obj) { - if (obj instanceof BytesRef) { - return ((BytesRef) obj).utf8ToString(); + /** + * Format obj using the provided {@link DocValueFormat}. + * If the format is equals to {@link DocValueFormat#RAW}, the object is returned as is + * for numbers and a string for {@link BytesRef}s. + */ + static Object formatObject(Object obj, DocValueFormat format) { + if (obj.getClass() == BytesRef.class) { + BytesRef value = (BytesRef) obj; + if (format == DocValueFormat.RAW) { + return value.utf8ToString(); + } else { + return format.format((BytesRef) obj); + } + } else if (obj.getClass() == Long.class) { + Long value = (Long) obj; + if (format == DocValueFormat.RAW) { + return value; + } else { + return format.format(value); + } + } else if (obj.getClass() == Double.class) { + Double value = (Double) obj; + if (format == DocValueFormat.RAW) { + return value; + } else { + return format.format((Double) obj); + } } return obj; } private static class ArrayMap extends AbstractMap { final List keys; + final List formats; final Object[] values; - ArrayMap(List keys, Object[] values) { - assert keys.size() == values.length; + ArrayMap(List keys, List formats, Object[] values) { + assert keys.size() == values.length && keys.size() == formats.size(); this.keys = keys; + this.formats = formats; this.values = values; } @@ -335,7 +387,7 @@ public int size() { public Object get(Object key) { for (int i = 0; i < keys.size(); i++) { if (key.equals(keys.get(i))) { - return formatObject(values[i]); + return formatObject(values[i], formats.get(i)); } } return null; @@ -356,7 +408,7 @@ public boolean hasNext() { @Override public Entry next() { SimpleEntry entry = - new SimpleEntry<>(keys.get(pos), formatObject(values[pos])); + new SimpleEntry<>(keys.get(pos), formatObject(values[pos], formats.get(pos))); ++ pos; return entry; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java index 481c14a37f504..6ca5cdbcb6230 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java @@ -95,6 +95,6 @@ protected CompositeValuesSourceConfig innerBuild(SearchContext context, canEarlyTerminate = checkCanEarlyTerminate(context.searcher().getIndexReader(), fieldContext.field(), order() == SortOrder.ASC ? false : true, sortField); } - return new CompositeValuesSourceConfig(name, vs, order(), canEarlyTerminate); + return new CompositeValuesSourceConfig(name, vs, config.format(), order(), canEarlyTerminate); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java index 0decfa05575e4..27890efbff182 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java @@ -44,6 +44,7 @@ public class AvgAggregator extends NumericMetricsAggregator.SingleValue { LongArray counts; DoubleArray sums; + DoubleArray compensations; DocValueFormat format; public AvgAggregator(String name, ValuesSource.Numeric valuesSource, DocValueFormat formatter, SearchContext context, @@ -55,6 +56,7 @@ public AvgAggregator(String name, ValuesSource.Numeric valuesSource, DocValueFor final BigArrays bigArrays = context.bigArrays(); counts = bigArrays.newLongArray(1, true); sums = bigArrays.newDoubleArray(1, true); + compensations = bigArrays.newDoubleArray(1, true); } } @@ -76,15 +78,29 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, public void collect(int doc, long bucket) throws IOException { counts = bigArrays.grow(counts, bucket + 1); sums = bigArrays.grow(sums, bucket + 1); + compensations = bigArrays.grow(compensations, bucket + 1); if (values.advanceExact(doc)) { final int valueCount = values.docValueCount(); counts.increment(bucket, valueCount); - double sum = 0; + // Compute the sum of double values with Kahan summation algorithm which is more + // accurate than naive summation. + double sum = sums.get(bucket); + double compensation = compensations.get(bucket); + for (int i = 0; i < valueCount; i++) { - sum += values.nextValue(); + double value = values.nextValue(); + if (Double.isFinite(value) == false) { + sum += value; + } else if (Double.isFinite(sum)) { + double corrected = value - compensation; + double newSum = sum + corrected; + compensation = (newSum - sum) - corrected; + sum = newSum; + } } - sums.increment(bucket, sum); + sums.set(bucket, sum); + compensations.set(bucket, compensation); } } }; @@ -113,7 +129,7 @@ public InternalAggregation buildEmptyAggregation() { @Override public void doClose() { - Releasables.close(counts, sums); + Releasables.close(counts, sums, compensations); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java index 7fdcc6396b8c1..c30574c576de8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java @@ -91,9 +91,20 @@ public String getWriteableName() { public InternalAvg doReduce(List aggregations, ReduceContext reduceContext) { long count = 0; double sum = 0; + double compensation = 0; + // Compute the sum of double values with Kahan summation algorithm which is more + // accurate than naive summation. for (InternalAggregation aggregation : aggregations) { - count += ((InternalAvg) aggregation).count; - sum += ((InternalAvg) aggregation).sum; + InternalAvg avg = (InternalAvg) aggregation; + count += avg.count; + if (Double.isFinite(avg.sum) == false) { + sum += avg.sum; + } else if (Double.isFinite(sum)) { + double corrected = avg.sum - compensation; + double newSum = sum + corrected; + compensation = (newSum - sum) - corrected; + sum = newSum; + } } return new InternalAvg(getName(), sum, count, format, pipelineAggregators(), getMetaData()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java index bebe9f892b6c3..04ef595690a33 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.metrics.scripted; import org.apache.lucene.index.LeafReaderContext; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script; import org.elasticsearch.script.SearchScript; @@ -77,6 +78,7 @@ public InternalAggregation buildAggregation(long owningBucketOrdinal) { Object aggregation; if (combineScript != null) { aggregation = combineScript.run(); + CollectionUtils.ensureNoSelfReferences(aggregation); } else { aggregation = params.get("_agg"); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java index 6d7ae0cddc0df..19f74cd72c821 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java @@ -152,12 +152,23 @@ public InternalStats doReduce(List aggregations, ReduceCont double min = Double.POSITIVE_INFINITY; double max = Double.NEGATIVE_INFINITY; double sum = 0; + double compensation = 0; for (InternalAggregation aggregation : aggregations) { InternalStats stats = (InternalStats) aggregation; count += stats.getCount(); min = Math.min(min, stats.getMin()); max = Math.max(max, stats.getMax()); - sum += stats.getSum(); + // Compute the sum of double values with Kahan summation algorithm which is more + // accurate than naive summation. + double value = stats.getSum(); + if (Double.isFinite(value) == false) { + sum += value; + } else if (Double.isFinite(sum)) { + double corrected = value - compensation; + double newSum = sum + corrected; + compensation = (newSum - sum) - corrected; + sum = newSum; + } } return new InternalStats(name, count, sum, min, max, format, pipelineAggregators(), getMetaData()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java index cca176bd1ad5f..321e9e10f0fe8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java @@ -45,6 +45,7 @@ public class StatsAggregator extends NumericMetricsAggregator.MultiValue { LongArray counts; DoubleArray sums; + DoubleArray compensations; DoubleArray mins; DoubleArray maxes; @@ -59,6 +60,7 @@ public StatsAggregator(String name, ValuesSource.Numeric valuesSource, DocValueF final BigArrays bigArrays = context.bigArrays(); counts = bigArrays.newLongArray(1, true); sums = bigArrays.newDoubleArray(1, true); + compensations = bigArrays.newDoubleArray(1, true); mins = bigArrays.newDoubleArray(1, false); mins.fill(0, mins.size(), Double.POSITIVE_INFINITY); maxes = bigArrays.newDoubleArray(1, false); @@ -88,6 +90,7 @@ public void collect(int doc, long bucket) throws IOException { final long overSize = BigArrays.overSize(bucket + 1); counts = bigArrays.resize(counts, overSize); sums = bigArrays.resize(sums, overSize); + compensations = bigArrays.resize(compensations, overSize); mins = bigArrays.resize(mins, overSize); maxes = bigArrays.resize(maxes, overSize); mins.fill(from, overSize, Double.POSITIVE_INFINITY); @@ -97,16 +100,28 @@ public void collect(int doc, long bucket) throws IOException { if (values.advanceExact(doc)) { final int valuesCount = values.docValueCount(); counts.increment(bucket, valuesCount); - double sum = 0; double min = mins.get(bucket); double max = maxes.get(bucket); + // Compute the sum of double values with Kahan summation algorithm which is more + // accurate than naive summation. + double sum = sums.get(bucket); + double compensation = compensations.get(bucket); + for (int i = 0; i < valuesCount; i++) { double value = values.nextValue(); - sum += value; + if (Double.isFinite(value) == false) { + sum += value; + } else if (Double.isFinite(sum)) { + double corrected = value - compensation; + double newSum = sum + corrected; + compensation = (newSum - sum) - corrected; + sum = newSum; + } min = Math.min(min, value); max = Math.max(max, value); } - sums.increment(bucket, sum); + sums.set(bucket, sum); + compensations.set(bucket, compensation); mins.set(bucket, min); maxes.set(bucket, max); } @@ -164,6 +179,6 @@ public InternalAggregation buildEmptyAggregation() { @Override public void doClose() { - Releasables.close(counts, maxes, mins, sums); + Releasables.close(counts, maxes, mins, sums, compensations); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java index 8dd78bf13730b..8339c06aefdcc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java @@ -49,9 +49,11 @@ public class ExtendedStatsAggregator extends NumericMetricsAggregator.MultiValue LongArray counts; DoubleArray sums; + DoubleArray compensations; DoubleArray mins; DoubleArray maxes; DoubleArray sumOfSqrs; + DoubleArray compensationOfSqrs; public ExtendedStatsAggregator(String name, ValuesSource.Numeric valuesSource, DocValueFormat formatter, SearchContext context, Aggregator parent, double sigma, List pipelineAggregators, @@ -65,11 +67,13 @@ public ExtendedStatsAggregator(String name, ValuesSource.Numeric valuesSource, D final BigArrays bigArrays = context.bigArrays(); counts = bigArrays.newLongArray(1, true); sums = bigArrays.newDoubleArray(1, true); + compensations = bigArrays.newDoubleArray(1, true); mins = bigArrays.newDoubleArray(1, false); mins.fill(0, mins.size(), Double.POSITIVE_INFINITY); maxes = bigArrays.newDoubleArray(1, false); maxes.fill(0, maxes.size(), Double.NEGATIVE_INFINITY); sumOfSqrs = bigArrays.newDoubleArray(1, true); + compensationOfSqrs = bigArrays.newDoubleArray(1, true); } } @@ -95,9 +99,11 @@ public void collect(int doc, long bucket) throws IOException { final long overSize = BigArrays.overSize(bucket + 1); counts = bigArrays.resize(counts, overSize); sums = bigArrays.resize(sums, overSize); + compensations = bigArrays.resize(compensations, overSize); mins = bigArrays.resize(mins, overSize); maxes = bigArrays.resize(maxes, overSize); sumOfSqrs = bigArrays.resize(sumOfSqrs, overSize); + compensationOfSqrs = bigArrays.resize(compensationOfSqrs, overSize); mins.fill(from, overSize, Double.POSITIVE_INFINITY); maxes.fill(from, overSize, Double.NEGATIVE_INFINITY); } @@ -105,19 +111,40 @@ public void collect(int doc, long bucket) throws IOException { if (values.advanceExact(doc)) { final int valuesCount = values.docValueCount(); counts.increment(bucket, valuesCount); - double sum = 0; - double sumOfSqr = 0; double min = mins.get(bucket); double max = maxes.get(bucket); + // Compute the sum and sum of squires for double values with Kahan summation algorithm + // which is more accurate than naive summation. + double sum = sums.get(bucket); + double compensation = compensations.get(bucket); + double sumOfSqr = sumOfSqrs.get(bucket); + double compensationOfSqr = compensationOfSqrs.get(bucket); for (int i = 0; i < valuesCount; i++) { double value = values.nextValue(); - sum += value; - sumOfSqr += value * value; + if (Double.isFinite(value) == false) { + sum += value; + sumOfSqr += value * value; + } else { + if (Double.isFinite(sum)) { + double corrected = value - compensation; + double newSum = sum + corrected; + compensation = (newSum - sum) - corrected; + sum = newSum; + } + if (Double.isFinite(sumOfSqr)) { + double correctedOfSqr = value * value - compensationOfSqr; + double newSumOfSqr = sumOfSqr + correctedOfSqr; + compensationOfSqr = (newSumOfSqr - sumOfSqr) - correctedOfSqr; + sumOfSqr = newSumOfSqr; + } + } min = Math.min(min, value); max = Math.max(max, value); } - sums.increment(bucket, sum); - sumOfSqrs.increment(bucket, sumOfSqr); + sums.set(bucket, sum); + compensations.set(bucket, compensation); + sumOfSqrs.set(bucket, sumOfSqr); + compensationOfSqrs.set(bucket, compensationOfSqr); mins.set(bucket, min); maxes.set(bucket, max); } @@ -196,6 +223,6 @@ public InternalAggregation buildEmptyAggregation() { @Override public void doClose() { - Releasables.close(counts, maxes, mins, sumOfSqrs, sums); + Releasables.close(counts, maxes, mins, sumOfSqrs, compensationOfSqrs, sums, compensations); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java index 6e06a88cccd32..1f259fbe87d9f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java @@ -45,7 +45,7 @@ public static Metrics resolve(String name) { private final double sigma; public InternalExtendedStats(String name, long count, double sum, double min, double max, double sumOfSqrs, double sigma, - DocValueFormat formatter, List pipelineAggregators, Map metaData) { + DocValueFormat formatter, List pipelineAggregators, Map metaData) { super(name, count, sum, min, max, formatter, pipelineAggregators, metaData); this.sumOfSqrs = sumOfSqrs; this.sigma = sigma; @@ -142,16 +142,25 @@ public String getStdDeviationBoundAsString(Bounds bound) { @Override public InternalExtendedStats doReduce(List aggregations, ReduceContext reduceContext) { double sumOfSqrs = 0; + double compensationOfSqrs = 0; for (InternalAggregation aggregation : aggregations) { InternalExtendedStats stats = (InternalExtendedStats) aggregation; if (stats.sigma != sigma) { throw new IllegalStateException("Cannot reduce other stats aggregations that have a different sigma"); } - sumOfSqrs += stats.getSumOfSquares(); + double value = stats.getSumOfSquares(); + if (Double.isFinite(value) == false) { + sumOfSqrs += value; + } else if (Double.isFinite(sumOfSqrs)) { + double correctedOfSqrs = value - compensationOfSqrs; + double newSumOfSqrs = sumOfSqrs + correctedOfSqrs; + compensationOfSqrs = (newSumOfSqrs - sumOfSqrs) - correctedOfSqrs; + sumOfSqrs = newSumOfSqrs; + } } final InternalStats stats = super.doReduce(aggregations, reduceContext); return new InternalExtendedStats(name, stats.getCount(), stats.getSum(), stats.getMin(), stats.getMax(), sumOfSqrs, sigma, - format, pipelineAggregators(), getMetaData()); + format, pipelineAggregators(), getMetaData()); } static class Fields { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java index 6f723f4fbcb28..fb64d168db6aa 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java @@ -35,7 +35,7 @@ public class InternalSum extends InternalNumericMetricsAggregation.SingleValue i private final double sum; public InternalSum(String name, double sum, DocValueFormat formatter, List pipelineAggregators, - Map metaData) { + Map metaData) { super(name, pipelineAggregators, metaData); this.sum = sum; this.format = formatter; @@ -73,9 +73,20 @@ public double getValue() { @Override public InternalSum doReduce(List aggregations, ReduceContext reduceContext) { + // Compute the sum of double values with Kahan summation algorithm which is more + // accurate than naive summation. double sum = 0; + double compensation = 0; for (InternalAggregation aggregation : aggregations) { - sum += ((InternalSum) aggregation).sum; + double value = ((InternalSum) aggregation).sum; + if (Double.isFinite(value) == false) { + sum += value; + } else if (Double.isFinite(sum)) { + double corrected = value - compensation; + double newSum = sum + corrected; + compensation = (newSum - sum) - corrected; + sum = newSum; + } } return new InternalSum(name, sum, format, pipelineAggregators(), getMetaData()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java index bd325b39373e5..9ed8103a1e1ee 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java @@ -43,6 +43,7 @@ public class SumAggregator extends NumericMetricsAggregator.SingleValue { private final DocValueFormat format; private DoubleArray sums; + private DoubleArray compensations; SumAggregator(String name, ValuesSource.Numeric valuesSource, DocValueFormat formatter, SearchContext context, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { @@ -51,6 +52,7 @@ public class SumAggregator extends NumericMetricsAggregator.SingleValue { this.format = formatter; if (valuesSource != null) { sums = context.bigArrays().newDoubleArray(1, true); + compensations = context.bigArrays().newDoubleArray(1, true); } } @@ -71,13 +73,27 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, @Override public void collect(int doc, long bucket) throws IOException { sums = bigArrays.grow(sums, bucket + 1); + compensations = bigArrays.grow(compensations, bucket + 1); + if (values.advanceExact(doc)) { final int valuesCount = values.docValueCount(); - double sum = 0; + // Compute the sum of double values with Kahan summation algorithm which is more + // accurate than naive summation. + double sum = sums.get(bucket); + double compensation = compensations.get(bucket); for (int i = 0; i < valuesCount; i++) { - sum += values.nextValue(); + double value = values.nextValue(); + if (Double.isFinite(value) == false) { + sum += value; + } else if (Double.isFinite(sum)) { + double corrected = value - compensation; + double newSum = sum + corrected; + compensation = (newSum - sum) - corrected; + sum = newSum; + } } - sums.increment(bucket, sum); + compensations.set(bucket, compensation); + sums.set(bucket, sum); } } }; @@ -106,6 +122,6 @@ public InternalAggregation buildEmptyAggregation() { @Override public void doClose() { - Releasables.close(sums); + Releasables.close(sums, compensations); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java index 0a56ae2c1cbfa..42337fbce0f98 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java @@ -112,10 +112,11 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext } else { ExecutableScript executableScript = factory.newInstance(vars); Object returned = executableScript.run(); + // no need to check for self references since only numbers are valid if (returned == null) { newBuckets.add(bucket); } else { - if (!(returned instanceof Number)) { + if ((returned instanceof Number) == false) { throw new AggregationExecutionException("series_arithmetic script for reducer [" + name() + "] must return a Number"); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java index b5a109e89cbad..6dc2758fa5c25 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java @@ -30,6 +30,7 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lucene.ScorerAware; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.index.fielddata.AbstractSortingNumericDocValues; import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -460,7 +461,9 @@ public boolean advanceExact(int doc) throws IOException { for (int i = 0; i < count; ++i) { final BytesRef value = bytesValues.nextValue(); script.setNextAggregationValue(value.utf8ToString()); - values[i].copyChars(script.run().toString()); + Object run = script.run(); + CollectionUtils.ensureNoSelfReferences(run); + values[i].copyChars(run.toString()); } sort(); return true; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptBytesValues.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptBytesValues.java index 38950325daa13..662d856603e54 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptBytesValues.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptBytesValues.java @@ -20,6 +20,7 @@ import org.apache.lucene.search.Scorer; import org.elasticsearch.common.lucene.ScorerAware; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortingBinaryDocValues; import org.elasticsearch.script.SearchScript; @@ -44,6 +45,7 @@ private void set(int i, Object o) { if (o == null) { values[i].clear(); } else { + CollectionUtils.ensureNoSelfReferences(o); values[i].copyChars(o.toString()); } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/ScriptFieldsFetchSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/ScriptFieldsFetchSubPhase.java index c45734108f56d..948bcc3e0b3ec 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/ScriptFieldsFetchSubPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/ScriptFieldsFetchSubPhase.java @@ -22,6 +22,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.fetch.FetchSubPhase; @@ -64,6 +65,7 @@ public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOExcept final Object value; try { value = leafScripts[i].run(); + CollectionUtils.ensureNoSelfReferences(value); } catch (RuntimeException e) { if (scriptFields.get(i).ignoreException()) { continue; diff --git a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java index 331988a183fa9..99668515de5b1 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -341,7 +342,9 @@ public boolean advanceExact(int doc) throws IOException { } @Override public BytesRef binaryValue() { - spare.copyChars(leafScript.run().toString()); + final Object run = leafScript.run(); + CollectionUtils.ensureNoSelfReferences(run); + spare.copyChars(run.toString()); return spare.get(); } }; diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadContext.java b/server/src/main/java9/org/elasticsearch/monitor/jvm/JvmPid.java similarity index 71% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadContext.java rename to server/src/main/java9/org/elasticsearch/monitor/jvm/JvmPid.java index d23ce56f57ad1..5ce8959601798 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadContext.java +++ b/server/src/main/java9/org/elasticsearch/monitor/jvm/JvmPid.java @@ -17,19 +17,14 @@ * under the License. */ -package org.elasticsearch.nio; +package org.elasticsearch.monitor.jvm; -import java.io.IOException; +import java.lang.ProcessHandle; -public interface ReadContext extends AutoCloseable { +class JvmPid { - int read() throws IOException; - - @Override - void close(); - - @FunctionalInterface - interface ReadConsumer { - int consumeReads(InboundChannelBuffer channelBuffer) throws IOException; + static long getPid() { + return ProcessHandle.current().pid(); } + } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java new file mode 100644 index 0000000000000..e616e0383118d --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.close; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; +import static org.hamcrest.CoreMatchers.equalTo; + +public class CloseIndexResponseTests extends ESTestCase { + + public void testFromToXContent() throws IOException { + final CloseIndexResponse closeIndexResponse = createTestItem(); + + boolean humanReadable = randomBoolean(); + final XContentType xContentType = randomFrom(XContentType.values()); + BytesReference originalBytes = toShuffledXContent(closeIndexResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); + BytesReference mutated; + if (randomBoolean()) { + mutated = insertRandomFields(xContentType, originalBytes, null, random()); + } else { + mutated = originalBytes; + } + + CloseIndexResponse parsedCloseIndexResponse; + try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { + parsedCloseIndexResponse = CloseIndexResponse.fromXContent(parser); + assertNull(parser.nextToken()); + } + assertThat(parsedCloseIndexResponse.isAcknowledged(), equalTo(closeIndexResponse.isAcknowledged())); + } + + private static CloseIndexResponse createTestItem() { + boolean acknowledged = randomBoolean(); + return new CloseIndexResponse(acknowledged); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index 14d6647071453..df63613b5b97d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -280,7 +280,7 @@ public void testMappingConflictRootCause() throws Exception { .field("type", "text") .endObject().endObject().endObject()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> b.get()); - assertThat(e.getMessage(), containsString("mapper [text] is used by multiple types")); + assertThat(e.getMessage(), containsString("Mapper for [text] conflicts with existing mapping:")); } public void testRestartIndexCreationAfterFullClusterRestart() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java index 41691f70c06f3..d7553ebf07cda 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java @@ -115,7 +115,7 @@ public void testToAndFromXContent() throws IOException { final XContentType xContentType = randomFrom(XContentType.values()); BytesReference originalBytes = toShuffledXContent(createIndexRequest, xContentType, EMPTY_PARAMS, humanReadable); - CreateIndexRequest parsedCreateIndexRequest = new CreateIndexRequest(createIndexRequest.index()); + CreateIndexRequest parsedCreateIndexRequest = new CreateIndexRequest(); parsedCreateIndexRequest.source(originalBytes, xContentType); assertMappingsEqual(createIndexRequest.mappings(), parsedCreateIndexRequest.mappings()); @@ -201,7 +201,7 @@ private static XContentBuilder randomMapping(String type) throws IOException { return builder; } - private static void randomMappingFields(XContentBuilder builder, boolean allowObjectField) throws IOException { + public static void randomMappingFields(XContentBuilder builder, boolean allowObjectField) throws IOException { builder.startObject("properties"); int fieldsNo = randomIntBetween(0, 5); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java index 96dcef700a956..902dc1870934c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java @@ -21,17 +21,26 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequestTests; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.yaml.YamlXContent; import org.elasticsearch.index.Index; import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; + public class PutMappingRequestTests extends ESTestCase { public void testValidation() { @@ -94,4 +103,79 @@ public void testPutMappingRequestSerialization() throws IOException { } } } + + public void testToXContent() throws IOException { + PutMappingRequest request = new PutMappingRequest("foo"); + request.type("my_type"); + + XContentBuilder mapping = JsonXContent.contentBuilder().startObject(); + mapping.startObject("properties"); + mapping.startObject("email"); + mapping.field("type", "text"); + mapping.endObject(); + mapping.endObject(); + mapping.endObject(); + request.source(mapping); + + String actualRequestBody = Strings.toString(request); + String expectedRequestBody = "{\"properties\":{\"email\":{\"type\":\"text\"}}}"; + assertEquals(expectedRequestBody, actualRequestBody); + } + + public void testToXContentWithEmptySource() throws IOException { + PutMappingRequest request = new PutMappingRequest("foo"); + request.type("my_type"); + + String actualRequestBody = Strings.toString(request); + String expectedRequestBody = "{}"; + assertEquals(expectedRequestBody, actualRequestBody); + } + + public void testToAndFromXContent() throws IOException { + + final PutMappingRequest putMappingRequest = createTestItem(); + + boolean humanReadable = randomBoolean(); + final XContentType xContentType = randomFrom(XContentType.values()); + BytesReference originalBytes = toShuffledXContent(putMappingRequest, xContentType, EMPTY_PARAMS, humanReadable); + + PutMappingRequest parsedPutMappingRequest = new PutMappingRequest(); + parsedPutMappingRequest.source(originalBytes, xContentType); + + assertMappingsEqual(putMappingRequest.source(), parsedPutMappingRequest.source()); + } + + private void assertMappingsEqual(String expected, String actual) throws IOException { + + XContentParser expectedJson = createParser(XContentType.JSON.xContent(), expected); + XContentParser actualJson = createParser(XContentType.JSON.xContent(), actual); + assertEquals(expectedJson.mapOrdered(), actualJson.mapOrdered()); + } + + /** + * Returns a random {@link PutMappingRequest}. + */ + private static PutMappingRequest createTestItem() throws IOException { + String index = randomAlphaOfLength(5); + + PutMappingRequest request = new PutMappingRequest(index); + + String type = randomAlphaOfLength(5); + request.type(type); + request.source(randomMapping()); + + return request; + } + + private static XContentBuilder randomMapping() throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + + if (randomBoolean()) { + CreateIndexRequestTests.randomMappingFields(builder, true); + } + + builder.endObject(); + return builder; + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponseTests.java new file mode 100644 index 0000000000000..a52969c628106 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponseTests.java @@ -0,0 +1,85 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.mapping.put; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; + +public class PutMappingResponseTests extends ESTestCase { + + public void testToXContent() { + PutMappingResponse response = new PutMappingResponse(true); + String output = Strings.toString(response); + assertEquals("{\"acknowledged\":true}", output); + } + + public void testToAndFromXContent() throws IOException { + doFromXContentTestWithRandomFields(false); + } + + /** + * This test adds random fields and objects to the xContent rendered out to + * ensure we can parse it back to be forward compatible with additions to + * the xContent + */ + public void testFromXContentWithRandomFields() throws IOException { + doFromXContentTestWithRandomFields(true); + } + + private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { + + final PutMappingResponse putMappingResponse = createTestItem(); + + boolean humanReadable = randomBoolean(); + final XContentType xContentType = randomFrom(XContentType.values()); + BytesReference originalBytes = toShuffledXContent(putMappingResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); + + BytesReference mutated; + if (addRandomFields) { + mutated = insertRandomFields(xContentType, originalBytes, null, random()); + } else { + mutated = originalBytes; + } + PutMappingResponse parsedPutMappingResponse; + try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { + parsedPutMappingResponse = PutMappingResponse.fromXContent(parser); + assertNull(parser.nextToken()); + } + + assertEquals(putMappingResponse.isAcknowledged(), parsedPutMappingResponse.isAcknowledged()); + } + + /** + * Returns a random {@link PutMappingResponse}. + */ + private static PutMappingResponse createTestItem() throws IOException { + boolean acknowledged = randomBoolean(); + + return new PutMappingResponse(acknowledged); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java index be84a8880641f..26785d2c8706c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java @@ -20,8 +20,8 @@ package org.elasticsearch.action.admin.indices.stats; import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -31,7 +31,6 @@ import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.List; import java.util.concurrent.TimeUnit; @@ -158,7 +157,7 @@ public void testRefreshListeners() throws Exception { * Gives access to package private IndicesStatsResponse constructor for test purpose. **/ public static IndicesStatsResponse newIndicesStatsResponse(ShardStats[] shards, int totalShards, int successfulShards, - int failedShards, List shardFailures) { + int failedShards, List shardFailures) { return new IndicesStatsResponse(shards, totalShards, successfulShards, failedShards, shardFailures); } diff --git a/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java b/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java index 73c77d0629295..8834ee203fba0 100644 --- a/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java @@ -20,15 +20,21 @@ package org.elasticsearch.action.get; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.VersionType; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.test.ESTestCase; import java.io.IOException; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; public class MultiGetRequestTests extends ESTestCase { @@ -129,4 +135,59 @@ public void testAddWithValidSourceValueIsAccepted() throws Exception { assertEquals(2, multiGetRequest.getItems().size()); } + + public void testXContentSerialization() throws IOException { + for (int runs = 0; runs < 20; runs++) { + MultiGetRequest expected = createTestInstance(); + XContentType xContentType = randomFrom(XContentType.values()); + BytesReference shuffled = toShuffledXContent(expected, xContentType, ToXContent.EMPTY_PARAMS, false); + XContentParser parser = createParser(XContentFactory.xContent(xContentType), shuffled); + MultiGetRequest actual = new MultiGetRequest(); + actual.add(null, null, null, null, null, parser, true); + assertThat(parser.nextToken(), nullValue()); + + assertThat(actual.items.size(), equalTo(expected.items.size())); + for (int i = 0; i < expected.items.size(); i++) { + MultiGetRequest.Item expectedItem = expected.items.get(i); + MultiGetRequest.Item actualItem = actual.items.get(i); + assertThat(actualItem, equalTo(expectedItem)); + } + } + } + + private MultiGetRequest createTestInstance() { + int numItems = randomIntBetween(0, 128); + MultiGetRequest request = new MultiGetRequest(); + for (int i = 0; i < numItems; i++) { + MultiGetRequest.Item item = new MultiGetRequest.Item(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4)); + if (randomBoolean()) { + item.version(randomNonNegativeLong()); + } + if (randomBoolean()) { + item.versionType(randomFrom(VersionType.values())); + } + if (randomBoolean()) { + FetchSourceContext fetchSourceContext; + if (randomBoolean()) { + fetchSourceContext = new FetchSourceContext(true, generateRandomStringArray(16, 8, false), + generateRandomStringArray(5, 4, false)); + } else { + fetchSourceContext = new FetchSourceContext(false); + } + item.fetchSourceContext(fetchSourceContext); + } + if (randomBoolean()) { + item.storedFields(generateRandomStringArray(16, 8, false)); + } + if (randomBoolean()) { + item.routing(randomAlphaOfLength(4)); + } + if (randomBoolean()) { + item.parent(randomAlphaOfLength(4)); + } + request.add(item); + } + return request; + } + } diff --git a/server/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java b/server/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java new file mode 100644 index 0000000000000..82638870eef58 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java @@ -0,0 +1,83 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.get; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class MultiGetResponseTests extends ESTestCase { + + public void testFromXContent() throws IOException { + for (int runs = 0; runs < 20; runs++) { + MultiGetResponse expected = createTestInstance(); + XContentType xContentType = randomFrom(XContentType.values()); + BytesReference shuffled = toShuffledXContent(expected, xContentType, ToXContent.EMPTY_PARAMS, false); + + XContentParser parser = createParser(XContentFactory.xContent(xContentType), shuffled); + MultiGetResponse parsed = MultiGetResponse.fromXContent(parser); + assertNull(parser.nextToken()); + assertNotSame(expected, parsed); + + assertThat(parsed.getResponses().length, equalTo(expected.getResponses().length)); + for (int i = 0; i < expected.getResponses().length; i++) { + MultiGetItemResponse expectedItem = expected.getResponses()[i]; + MultiGetItemResponse actualItem = parsed.getResponses()[i]; + assertThat(actualItem.getIndex(), equalTo(expectedItem.getIndex())); + assertThat(actualItem.getType(), equalTo(expectedItem.getType())); + assertThat(actualItem.getId(), equalTo(expectedItem.getId())); + if (expectedItem.isFailed()) { + assertThat(actualItem.isFailed(), is(true)); + assertThat(actualItem.getFailure().getMessage(), containsString(expectedItem.getFailure().getMessage())); + } else { + assertThat(actualItem.isFailed(), is(false)); + assertThat(actualItem.getResponse(), equalTo(expectedItem.getResponse())); + } + } + } + } + + private static MultiGetResponse createTestInstance() { + MultiGetItemResponse[] items = new MultiGetItemResponse[randomIntBetween(0, 128)]; + for (int i = 0; i < items.length; i++) { + if (randomBoolean()) { + items[i] = new MultiGetItemResponse(new GetResponse(new GetResult( + randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4), randomNonNegativeLong(), + true, null, null + )), null); + } else { + items[i] = new MultiGetItemResponse(null, new MultiGetResponse.Failure(randomAlphaOfLength(4), + randomAlphaOfLength(4), randomAlphaOfLength(4), new RuntimeException(randomAlphaOfLength(4)))); + } + } + return new MultiGetResponse(items); + } + +} diff --git a/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index 470da323043ae..6a7d443553888 100644 --- a/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -22,8 +22,8 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; @@ -109,7 +109,7 @@ public static class Response extends BroadcastResponse { public Response() { } - public Response(int totalShards, int successfulShards, int failedShards, List shardFailures) { + public Response(int totalShards, int successfulShards, int failedShards, List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); } } @@ -127,7 +127,7 @@ protected EmptyResult readShardResult(StreamInput in) throws IOException { } @Override - protected Response newResponse(Request request, int totalShards, int successfulShards, int failedShards, List emptyResults, List shardFailures, ClusterState clusterState) { + protected Response newResponse(Request request, int totalShards, int successfulShards, int failedShards, List emptyResults, List shardFailures, ClusterState clusterState) { return new Response(totalShards, successfulShards, failedShards, shardFailures); } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java index 3aeab0fa5fb5b..15d7f6d7c5992 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java @@ -21,12 +21,12 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.flush.TransportFlushAction; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.ClusterState; @@ -221,7 +221,7 @@ protected BasicReplicationRequest newShardRequest(DummyBroadcastRequest request, @Override protected BroadcastResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, - List shardFailures) { + List shardFailures) { return new BroadcastResponse(totalNumCopies, successfulShards, failedShards, shardFailures); } diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index 7049d0fa9e98e..36266026504a9 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -639,4 +639,16 @@ public void testUpdateScript() throws Exception { assertThat(result.action(), instanceOf(UpdateResponse.class)); assertThat(result.getResponseResult(), equalTo(DocWriteResponse.Result.NOOP)); } + + public void testToString() throws IOException { + UpdateRequest request = new UpdateRequest("test", "type1", "1") + .script(mockInlineScript("ctx._source.body = \"foo\"")); + assertThat(request.toString(), equalTo("update {[test][type1][1], doc_as_upsert[false], " + + "script[Script{type=inline, lang='mock', idOrCode='ctx._source.body = \"foo\"', options={}, params={}}], " + + "scripted_upsert[false], detect_noop[true]}")); + request = new UpdateRequest("test", "type1", "1").fromXContent( + createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"bar\"}}"))); + assertThat(request.toString(), equalTo("update {[test][type1][1], doc_as_upsert[false], " + + "doc[index {[null][null][null], source[{\"body\":\"bar\"}]}], scripted_upsert[false], detect_noop[true]}")); + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java index 91a41495a461a..d887387d43fe9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java @@ -21,11 +21,14 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -34,7 +37,9 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; +import java.util.HashSet; import java.util.List; +import java.util.Set; import static org.hamcrest.Matchers.equalTo; @@ -156,5 +161,58 @@ public void testInvalidIPFilterClusterSettings() { .execute().actionGet()); assertEquals("invalid IP address [192.168.1.1.] for [" + filterSetting.getKey() + ipKey + "]", e.getMessage()); } + + public void testTransientSettingsStillApplied() throws Exception { + List nodes = internalCluster().startNodes(6); + Set excludeNodes = new HashSet<>(nodes.subList(0, 3)); + Set includeNodes = new HashSet<>(nodes.subList(3, 6)); + logger.info("--> exclude: [{}], include: [{}]", + Strings.collectionToCommaDelimitedString(excludeNodes), + Strings.collectionToCommaDelimitedString(includeNodes)); + ensureStableCluster(6); + client().admin().indices().prepareCreate("test").get(); + ensureGreen("test"); + + Settings exclude = Settings.builder().put("cluster.routing.allocation.exclude._name", + Strings.collectionToCommaDelimitedString(excludeNodes)).build(); + + logger.info("--> updating settings"); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(exclude).get(); + + logger.info("--> waiting for relocation"); + waitForRelocation(ClusterHealthStatus.GREEN); + + ClusterState state = client().admin().cluster().prepareState().get().getState(); + + for (ShardRouting shard : state.getRoutingTable().shardsWithState(ShardRoutingState.STARTED)) { + String node = state.getRoutingNodes().node(shard.currentNodeId()).node().getName(); + logger.info("--> shard on {} - {}", node, shard); + assertTrue("shard on " + node + " but should only be on the include node list: " + + Strings.collectionToCommaDelimitedString(includeNodes), + includeNodes.contains(node)); + } + + Settings other = Settings.builder().put("cluster.info.update.interval", "45s").build(); + + logger.info("--> updating settings with random persistent setting"); + client().admin().cluster().prepareUpdateSettings() + .setPersistentSettings(other).setTransientSettings(exclude).get(); + + logger.info("--> waiting for relocation"); + waitForRelocation(ClusterHealthStatus.GREEN); + + state = client().admin().cluster().prepareState().get().getState(); + + // The transient settings still exist in the state + assertThat(state.metaData().transientSettings(), equalTo(exclude)); + + for (ShardRouting shard : state.getRoutingTable().shardsWithState(ShardRoutingState.STARTED)) { + String node = state.getRoutingNodes().node(shard.currentNodeId()).node().getName(); + logger.info("--> shard on {} - {}", node, shard); + assertTrue("shard on " + node + " but should only be on the include node list: " + + Strings.collectionToCommaDelimitedString(includeNodes), + includeNodes.contains(node)); + } + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java index f44d0b7c4036e..a315cdc820678 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java @@ -249,7 +249,7 @@ public void testRequestStateOpen() throws Exception { @SuppressWarnings("unchecked") public void testIndexRemovalOnFailure() throws Exception { - doThrow(new RuntimeException("oops")).when(mapper).merge(anyMap(), anyObject(), anyBoolean()); + doThrow(new RuntimeException("oops")).when(mapper).merge(anyMap(), anyObject()); expectThrows(RuntimeException.class, this::executeTask); @@ -333,7 +333,7 @@ private void addMatchingTemplate(MetaDataBuilderConfigurator configurator) throw @SuppressWarnings("unchecked") private Map> getMappingsFromResponse() { final ArgumentCaptor argument = ArgumentCaptor.forClass(Map.class); - verify(mapper).merge(argument.capture(), anyObject(), anyBoolean()); + verify(mapper).merge(argument.capture(), anyObject()); return argument.getValue(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java index 9200e04c7127a..6bfb78a2ade9c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java @@ -140,15 +140,14 @@ public void testDeltas() { DiscoveryNodes.Delta delta = discoNodesB.delta(discoNodesA); - if (masterB == null || Objects.equals(masterAId, masterBId)) { + if (Objects.equals(masterAId, masterBId)) { assertFalse(delta.masterNodeChanged()); assertThat(delta.previousMasterNode(), nullValue()); assertThat(delta.newMasterNode(), nullValue()); } else { assertTrue(delta.masterNodeChanged()); - assertThat(delta.newMasterNode().getId(), equalTo(masterBId)); - assertThat(delta.previousMasterNode() != null ? delta.previousMasterNode().getId() : null, - equalTo(masterAId)); + assertThat(delta.newMasterNode() != null ? delta.newMasterNode().getId() : null, equalTo(masterBId)); + assertThat(delta.previousMasterNode() != null ? delta.previousMasterNode().getId() : null, equalTo(masterAId)); } Set newNodes = new HashSet<>(nodesB); diff --git a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index 29c7a2b161403..0f4d0cf66346a 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -261,6 +261,21 @@ public void testAddConsumerAffixMap() { assertEquals(2, listResults.size()); assertEquals(2, intResults.size()); + service.applySettings(Settings.builder() + .put("foo.test.bar", 2) + .put("foo.test_1.bar", 7) + .putList("foo.test_list.list", "16", "17") + .putList("foo.test_list_1.list", "18", "19", "20") + .build()); + + assertEquals(2, intResults.get("test").intValue()); + assertEquals(7, intResults.get("test_1").intValue()); + assertEquals(Arrays.asList(16, 17), listResults.get("test_list")); + assertEquals(Arrays.asList(18, 19, 20), listResults.get("test_list_1")); + assertEquals(2, listResults.size()); + assertEquals(2, intResults.size()); + + listResults.clear(); intResults.clear(); @@ -286,6 +301,35 @@ public void testAddConsumerAffixMap() { } + public void testAffixMapConsumerNotCalledWithNull() { + Setting.AffixSetting prefixSetting = Setting.prefixKeySetting("eggplant.", + (k) -> Setting.intSetting(k, 1, Property.Dynamic, Property.NodeScope)); + Setting.AffixSetting otherSetting = Setting.prefixKeySetting("other.", + (k) -> Setting.intSetting(k, 1, Property.Dynamic, Property.NodeScope)); + AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY,new HashSet<>(Arrays.asList(prefixSetting, otherSetting))); + Map affixResults = new HashMap<>(); + + Consumer> consumer = (map) -> { + logger.info("--> consuming settings {}", map); + affixResults.clear(); + affixResults.putAll(map); + }; + service.addAffixMapUpdateConsumer(prefixSetting, consumer, (s, k) -> {}, randomBoolean()); + assertEquals(0, affixResults.size()); + service.applySettings(Settings.builder() + .put("eggplant._name", 2) + .build()); + assertThat(affixResults.size(), equalTo(1)); + assertThat(affixResults.get("_name"), equalTo(2)); + + service.applySettings(Settings.builder() + .put("eggplant._name", 2) + .put("other.thing", 3) + .build()); + + assertThat(affixResults.get("_name"), equalTo(2)); + } + public void testApply() { Setting testSetting = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope); Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, Property.Dynamic, Property.NodeScope); diff --git a/server/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java b/server/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java index 8c192a2a35091..2ca8189a972fd 100644 --- a/server/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java @@ -25,16 +25,21 @@ import org.apache.lucene.util.Counter; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.SortedSet; import java.util.TreeSet; +import static java.util.Collections.emptyMap; import static org.elasticsearch.common.util.CollectionUtils.eagerPartition; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -176,4 +181,15 @@ public void testPerfectPartition() { eagerPartition(Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), 6) ); } + + public void testEnsureNoSelfReferences() { + CollectionUtils.ensureNoSelfReferences(emptyMap()); + CollectionUtils.ensureNoSelfReferences(null); + + Map map = new HashMap<>(); + map.put("field", map); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> CollectionUtils.ensureNoSelfReferences(map)); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself")); + } } diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java index e368163a4e95c..c7205b3200f1c 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.DistanceUnit; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matcher; @@ -854,19 +855,19 @@ public void testEnsureNotNull() { } public void testEnsureNoSelfReferences() throws IOException { - XContentBuilder.ensureNoSelfReferences(emptyMap()); - XContentBuilder.ensureNoSelfReferences(null); + CollectionUtils.ensureNoSelfReferences(emptyMap()); + CollectionUtils.ensureNoSelfReferences(null); Map map = new HashMap<>(); map.put("field", map); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder().map(map)); - assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself")); } /** * Test that the same map written multiple times do not trigger the self-reference check in - * {@link XContentBuilder#ensureNoSelfReferences(Object)} + * {@link CollectionUtils#ensureNoSelfReferences(Object)} */ public void testRepeatedMapsAndNoSelfReferences() throws Exception { Map mapB = singletonMap("b", "B"); @@ -899,7 +900,7 @@ public void testSelfReferencingMapsOneLevel() throws IOException { map1.put("map0", map0); // map 1 -> map 0 loop IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder().map(map0)); - assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself")); } public void testSelfReferencingMapsTwoLevels() throws IOException { @@ -917,7 +918,7 @@ public void testSelfReferencingMapsTwoLevels() throws IOException { map2.put("map0", map0); // map 2 -> map 0 loop IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder().map(map0)); - assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself")); } public void testSelfReferencingObjectsArray() throws IOException { @@ -930,13 +931,13 @@ public void testSelfReferencingObjectsArray() throws IOException { .startObject() .field("field", values) .endObject()); - assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself")); e = expectThrows(IllegalArgumentException.class, () -> builder() .startObject() .array("field", values) .endObject()); - assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself")); } public void testSelfReferencingIterable() throws IOException { @@ -949,7 +950,7 @@ public void testSelfReferencingIterable() throws IOException { .startObject() .field("field", (Iterable) values) .endObject()); - assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself")); } public void testSelfReferencingIterableOneLevel() throws IOException { @@ -964,7 +965,7 @@ public void testSelfReferencingIterableOneLevel() throws IOException { .startObject() .field("field", (Iterable) values) .endObject()); - assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself")); } public void testSelfReferencingIterableTwoLevels() throws IOException { @@ -984,7 +985,7 @@ public void testSelfReferencingIterableTwoLevels() throws IOException { map2.put("map0", map0); // map 2 -> map 0 loop IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder().map(map0)); - assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself")); } public void testChecksForDuplicates() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/get/GetActionIT.java b/server/src/test/java/org/elasticsearch/get/GetActionIT.java index d468d58212d16..911e26528c9ad 100644 --- a/server/src/test/java/org/elasticsearch/get/GetActionIT.java +++ b/server/src/test/java/org/elasticsearch/get/GetActionIT.java @@ -21,7 +21,6 @@ import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.delete.DeleteResponse; @@ -30,6 +29,7 @@ import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.get.MultiGetRequestBuilder; import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -748,7 +748,7 @@ public void testGetFieldsComplexField() throws Exception { if (flushResponse.getSuccessfulShards() == 0) { StringBuilder sb = new StringBuilder("failed to flush at least one shard. total shards [") .append(flushResponse.getTotalShards()).append("], failed shards: [").append(flushResponse.getFailedShards()).append("]"); - for (ShardOperationFailedException failure: flushResponse.getShardFailures()) { + for (DefaultShardOperationFailedException failure: flushResponse.getShardFailures()) { sb.append("\nShard failure: ").append(failure); } fail(sb.toString()); diff --git a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java index e74cde52aa418..d4af783681029 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java @@ -54,7 +54,8 @@ public class CombinedDeletionPolicyTests extends ESTestCase { public void testKeepCommitsAfterGlobalCheckpoint() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(OPEN_INDEX_AND_TRANSLOG, translogPolicy, globalCheckpoint::get); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( + OPEN_INDEX_AND_TRANSLOG, translogPolicy, globalCheckpoint::get, null); final LongArrayList maxSeqNoList = new LongArrayList(); final LongArrayList translogGenList = new LongArrayList(); @@ -93,7 +94,8 @@ public void testAcquireIndexCommit() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); final UUID translogUUID = UUID.randomUUID(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(OPEN_INDEX_AND_TRANSLOG, translogPolicy, globalCheckpoint::get); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( + OPEN_INDEX_AND_TRANSLOG, translogPolicy, globalCheckpoint::get, null); long lastMaxSeqNo = between(1, 1000); long lastTranslogGen = between(1, 20); int safeIndex = 0; @@ -156,11 +158,12 @@ public void testLegacyIndex() throws Exception { final UUID translogUUID = UUID.randomUUID(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(OPEN_INDEX_AND_TRANSLOG, translogPolicy, globalCheckpoint::get); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( + OPEN_INDEX_AND_TRANSLOG, translogPolicy, globalCheckpoint::get, null); long legacyTranslogGen = randomNonNegativeLong(); IndexCommit legacyCommit = mockLegacyIndexCommit(translogUUID, legacyTranslogGen); - indexPolicy.onInit(singletonList(legacyCommit)); + indexPolicy.onCommit(singletonList(legacyCommit)); verify(legacyCommit, never()).delete(); assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), equalTo(legacyTranslogGen)); assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(legacyTranslogGen)); @@ -188,7 +191,8 @@ public void testLegacyIndex() throws Exception { public void testDeleteInvalidCommits() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(randomNonNegativeLong()); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(OPEN_INDEX_CREATE_TRANSLOG, translogPolicy, globalCheckpoint::get); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( + OPEN_INDEX_CREATE_TRANSLOG, translogPolicy, globalCheckpoint::get, null); final int invalidCommits = between(1, 10); final List commitList = new ArrayList<>(); @@ -211,6 +215,73 @@ public void testDeleteInvalidCommits() throws Exception { } } + /** + * Keeping existing unsafe commits can be problematic because these commits are not safe at the recovering time + * but they can suddenly become safe in the future. See {@link CombinedDeletionPolicy#keepOnlyStartingCommitOnInit(List)} + */ + public void testKeepOnlyStartingCommitOnInit() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(randomNonNegativeLong()); + TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); + final UUID translogUUID = UUID.randomUUID(); + final List commitList = new ArrayList<>(); + int totalCommits = between(2, 20); + for (int i = 0; i < totalCommits; i++) { + commitList.add(mockIndexCommit(randomNonNegativeLong(), translogUUID, randomNonNegativeLong())); + } + final IndexCommit startingCommit = randomFrom(commitList); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( + OPEN_INDEX_AND_TRANSLOG, translogPolicy, globalCheckpoint::get, startingCommit); + indexPolicy.onInit(commitList); + for (IndexCommit commit : commitList) { + if (commit.equals(startingCommit) == false) { + verify(commit, times(1)).delete(); + } + } + verify(startingCommit, never()).delete(); + assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), + equalTo(Long.parseLong(startingCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); + assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), + equalTo(Long.parseLong(startingCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); + } + + public void testCheckUnreferencedCommits() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + final UUID translogUUID = UUID.randomUUID(); + final TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( + OPEN_INDEX_AND_TRANSLOG, translogPolicy, globalCheckpoint::get, null); + final List commitList = new ArrayList<>(); + int totalCommits = between(2, 20); + long lastMaxSeqNo = between(1, 1000); + long lastTranslogGen = between(1, 50); + for (int i = 0; i < totalCommits; i++) { + lastMaxSeqNo += between(1, 10000); + lastTranslogGen += between(1, 100); + commitList.add(mockIndexCommit(lastMaxSeqNo, translogUUID, lastTranslogGen)); + } + IndexCommit safeCommit = randomFrom(commitList); + globalCheckpoint.set(Long.parseLong(safeCommit.getUserData().get(SequenceNumbers.MAX_SEQ_NO))); + indexPolicy.onCommit(commitList); + if (safeCommit == commitList.get(commitList.size() - 1)) { + // Safe commit is the last commit - no need to clean up + assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), equalTo(lastTranslogGen)); + assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(lastTranslogGen)); + assertThat(indexPolicy.hasUnreferencedCommits(), equalTo(false)); + } else { + // Advanced but not enough + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), lastMaxSeqNo - 1)); + assertThat(indexPolicy.hasUnreferencedCommits(), equalTo(false)); + // Advanced enough + globalCheckpoint.set(randomLongBetween(lastMaxSeqNo, Long.MAX_VALUE)); + assertThat(indexPolicy.hasUnreferencedCommits(), equalTo(true)); + indexPolicy.onCommit(commitList); + // Safe commit is the last commit - no need to clean up + assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), equalTo(lastTranslogGen)); + assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(lastTranslogGen)); + assertThat(indexPolicy.hasUnreferencedCommits(), equalTo(false)); + } + } + IndexCommit mockIndexCommit(long maxSeqNo, UUID translogUUID, long translogGen) throws IOException { final Map userData = new HashMap<>(); userData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo)); diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 518411e59e8cd..2a7e49aa66b61 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -163,6 +163,7 @@ import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA; import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.greaterThan; @@ -4010,13 +4011,15 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { boolean flushed = false; + AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); Engine recoveringEngine = null; try { assertEquals(docs - 1, engine.getLocalCheckpointTracker().getMaxSeqNo()); assertEquals(docs - 1, engine.getLocalCheckpointTracker().getCheckpoint()); assertEquals(maxSeqIDOnReplica, replicaEngine.getLocalCheckpointTracker().getMaxSeqNo()); assertEquals(checkpointOnReplica, replicaEngine.getLocalCheckpointTracker().getCheckpoint()); - recoveringEngine = new InternalEngine(copy(replicaEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG)); + recoveringEngine = new InternalEngine(copy( + replicaEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, globalCheckpoint::get)); assertEquals(numDocsOnReplica, recoveringEngine.getTranslog().uncommittedOperations()); recoveringEngine.recoverFromTranslog(); assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo()); @@ -4038,6 +4041,8 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo()); assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpointTracker().getCheckpoint()); if ((flushed = randomBoolean())) { + globalCheckpoint.set(recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo()); + recoveringEngine.getTranslog().sync(); recoveringEngine.flush(true, true); } } @@ -4047,7 +4052,8 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { // now do it again to make sure we preserve values etc. try { - recoveringEngine = new InternalEngine(copy(replicaEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG)); + recoveringEngine = new InternalEngine( + copy(replicaEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, globalCheckpoint::get)); if (flushed) { assertEquals(0, recoveringEngine.getTranslog().uncommittedOperations()); } @@ -4355,4 +4361,82 @@ public void testAcquireIndexCommit() throws Exception { assertThat(DirectoryReader.listCommits(engine.store.directory()), hasSize(1)); } } + + public void testOpenIndexAndTranslogKeepOnlySafeCommit() throws Exception { + IOUtils.close(engine); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + final EngineConfig config = copy(engine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, globalCheckpoint::get); + final IndexCommit safeCommit; + try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG))) { + final int numDocs = between(5, 50); + for (int i = 0; i < numDocs; i++) { + index(engine, i); + if (randomBoolean()) { + engine.flush(); + } + } + // Selects a starting commit and advances and persists the global checkpoint to that commit. + final List commits = DirectoryReader.listCommits(engine.store.directory()); + safeCommit = randomFrom(commits); + globalCheckpoint.set(Long.parseLong(safeCommit.getUserData().get(SequenceNumbers.MAX_SEQ_NO))); + engine.getTranslog().sync(); + } + try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG))) { + final List existingCommits = DirectoryReader.listCommits(engine.store.directory()); + assertThat("OPEN_INDEX_AND_TRANSLOG should keep only safe commit", existingCommits, contains(safeCommit)); + } + } + + public void testOpenIndexCreateTranslogKeepOnlyLastCommit() throws Exception { + IOUtils.close(engine); + final EngineConfig config = copy(engine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG); + final Map lastCommit; + try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG))) { + engine.skipTranslogRecovery(); + final int numDocs = between(5, 50); + for (int i = 0; i < numDocs; i++) { + index(engine, i); + if (randomBoolean()) { + engine.flush(); + } + } + final List commits = DirectoryReader.listCommits(engine.store.directory()); + lastCommit = commits.get(commits.size() - 1).getUserData(); + } + try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG))) { + final List existingCommits = DirectoryReader.listCommits(engine.store.directory()); + assertThat("OPEN_INDEX_CREATE_TRANSLOG should keep only last commit", existingCommits, hasSize(1)); + final Map userData = existingCommits.get(0).getUserData(); + assertThat(userData.get(SequenceNumbers.MAX_SEQ_NO), equalTo(lastCommit.get(SequenceNumbers.MAX_SEQ_NO))); + assertThat(userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY), equalTo(lastCommit.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY))); + // Translog tags should be fresh. + assertThat(userData.get(Translog.TRANSLOG_UUID_KEY), not(equalTo(lastCommit.get(Translog.TRANSLOG_UUID_KEY)))); + assertThat(userData.get(Translog.TRANSLOG_GENERATION_KEY), equalTo("1")); + } + } + + public void testCleanUpCommitsWhenGlobalCheckpointAdvanced() throws Exception { + IOUtils.close(engine, store); + store = createStore(); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + try (InternalEngine engine = createEngine(store, createTempDir(), globalCheckpoint::get)) { + final int numDocs = scaledRandomIntBetween(10, 100); + for (int docId = 0; docId < numDocs; docId++) { + index(engine, docId); + if (frequently()) { + engine.flush(randomBoolean(), randomBoolean()); + } + } + engine.flush(false, randomBoolean()); + List commits = DirectoryReader.listCommits(store.directory()); + // Global checkpoint advanced but not enough - all commits are kept. + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpointTracker().getCheckpoint() - 1)); + engine.syncTranslog(); + assertThat(DirectoryReader.listCommits(store.directory()), equalTo(commits)); + // Global checkpoint advanced enough - only the last commit is kept. + globalCheckpoint.set(randomLongBetween(engine.getLocalCheckpointTracker().getCheckpoint(), Long.MAX_VALUE)); + engine.syncTranslog(); + assertThat(DirectoryReader.listCommits(store.directory()), contains(commits.get(commits.size() - 1))); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java index 4ccc8bc215fd7..b33d98c9b0015 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java @@ -33,7 +33,7 @@ public void testUpdateDefaultSearchAnalyzer() throws Exception { .put("index.analysis.analyzer.default_search.type", "custom") .put("index.analysis.analyzer.default_search.tokenizer", "standard").build()); String mapping = XContentFactory.jsonBuilder().startObject().startObject("_doc").endObject().endObject().string(); - indexService.mapperService().merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + indexService.mapperService().merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, indexService.mapperService().documentMapper("_doc").mapping().toString()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java index e6a1c0a69d81a..bb839d8e57361 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java @@ -211,7 +211,7 @@ public void testMultiFields() throws IOException { .endObject().endObject() .endObject().endObject().string(); DocumentMapper mapper = indexService.mapperService() - .merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + .merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); BytesReference source = XContentFactory.jsonBuilder() .startObject() diff --git a/server/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java index a0b6a1458e24f..b4c698fa26d0b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java @@ -306,11 +306,11 @@ public void testCopyToFieldMerge() throws Exception { .endObject().endObject().endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper docMapperBefore = mapperService.merge("type1", new CompressedXContent(mappingBefore), MapperService.MergeReason.MAPPING_UPDATE, false); + DocumentMapper docMapperBefore = mapperService.merge("type1", new CompressedXContent(mappingBefore), MapperService.MergeReason.MAPPING_UPDATE); assertEquals(Arrays.asList("foo", "bar"), docMapperBefore.mappers().getMapper("copy_test").copyTo().copyToFields()); - DocumentMapper docMapperAfter = mapperService.merge("type1", new CompressedXContent(mappingAfter), MapperService.MergeReason.MAPPING_UPDATE, false); + DocumentMapper docMapperAfter = mapperService.merge("type1", new CompressedXContent(mappingAfter), MapperService.MergeReason.MAPPING_UPDATE); assertEquals(Arrays.asList("baz", "bar"), docMapperAfter.mappers().getMapper("copy_test").copyTo().copyToFields()); assertEquals(Arrays.asList("foo", "bar"), docMapperBefore.mappers().getMapper("copy_test").copyTo().copyToFields()); @@ -438,7 +438,7 @@ public void testCopyToChildNested() throws Exception { .endObject(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("_doc", new CompressedXContent(rootToNestedMapping.bytes()), - MergeReason.MAPPING_UPDATE, false)); + MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), Matchers.startsWith("Illegal combination of [copy_to] and [nested] mappings")); XContentBuilder nestedToNestedMapping = jsonBuilder().startObject() @@ -466,7 +466,7 @@ public void testCopyToChildNested() throws Exception { .endObject(); e = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("_doc", new CompressedXContent(nestedToNestedMapping.bytes()), - MergeReason.MAPPING_UPDATE, false)); + MergeReason.MAPPING_UPDATE)); } public void testCopyToSiblingNested() throws Exception { @@ -496,7 +496,7 @@ public void testCopyToSiblingNested() throws Exception { .endObject(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("_doc", new CompressedXContent(rootToNestedMapping.bytes()), - MergeReason.MAPPING_UPDATE, false)); + MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), Matchers.startsWith("Illegal combination of [copy_to] and [nested] mappings")); } @@ -517,7 +517,7 @@ public void testCopyToObject() throws Exception { .endObject(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("_doc", new CompressedXContent(rootToNestedMapping.bytes()), - MergeReason.MAPPING_UPDATE, false)); + MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), Matchers.startsWith("Cannot copy to field [target] since it is mapped as an object")); } @@ -585,7 +585,7 @@ public void testCopyToMultiField() throws Exception { MapperService mapperService = createIndex("test").mapperService(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean())); + () -> mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE)); assertEquals("[copy_to] may not be used to copy to a multi-field: [my_field.bar]", e.getMessage()); } @@ -608,7 +608,7 @@ public void testNestedCopyTo() throws Exception { .endObject().endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()); // no exception + mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); // no exception } public void testNestedCopyToMultiField() throws Exception { @@ -633,7 +633,7 @@ public void testNestedCopyToMultiField() throws Exception { MapperService mapperService = createIndex("test").mapperService(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean())); + () -> mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE)); assertEquals("[copy_to] may not be used to copy to a multi-field: [n.my_field.bar]", e.getMessage()); } @@ -654,7 +654,7 @@ public void testCopyFromMultiField() throws Exception { MapperService mapperService = createIndex("test").mapperService(); MapperParsingException e = expectThrows(MapperParsingException.class, - () -> mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean())); + () -> mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), Matchers.containsString("copy_to in multi fields is not allowed. Found the copy_to in field [bar] " + "which is within a multi field.")); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java index 5776e9d618e3b..23bcba4cda76b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java @@ -380,7 +380,7 @@ public void testMergeDate() throws IOException { .startObject("release_date").field("type", "date").field("format", "yyyy/MM/dd").endObject() .endObject().endObject().endObject().string(); DocumentMapper initMapper = indexService.mapperService().merge("movie", new CompressedXContent(initMapping), - MapperService.MergeReason.MAPPING_UPDATE, randomBoolean()); + MapperService.MergeReason.MAPPING_UPDATE); assertThat(initMapper.mappers().getMapper("release_date"), notNullValue()); assertFalse(initMapper.mappers().getMapper("release_date").fieldType().stored()); @@ -392,7 +392,7 @@ public void testMergeDate() throws IOException { Exception e = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("movie", new CompressedXContent(updateFormatMapping), - MapperService.MergeReason.MAPPING_UPDATE, randomBoolean())); + MapperService.MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), containsString("[mapper [release_date] has different [format] values]")); } @@ -408,7 +408,7 @@ public void testMergeText() throws Exception { DocumentMapper update = indexService.mapperService().parse("_doc", new CompressedXContent(mappingUpdate), false); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> mapper.merge(update.mapping(), randomBoolean())); + () -> mapper.merge(update.mapping())); assertEquals("mapper [date] of different type, current_type [date], merged_type [text]", e.getMessage()); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperMergeTests.java index 68389112bfd51..b528c2119cfe1 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperMergeTests.java @@ -53,7 +53,7 @@ public void test1Merge() throws Exception { .endObject().endObject().endObject().string(); DocumentMapper stage2 = parser.parse("person", new CompressedXContent(stage2Mapping)); - DocumentMapper merged = stage1.merge(stage2.mapping(), false); + DocumentMapper merged = stage1.merge(stage2.mapping()); // stage1 mapping should not have been modified assertThat(stage1.mappers().smartNameFieldMapper("age"), nullValue()); assertThat(stage1.mappers().smartNameFieldMapper("obj1.prop1"), nullValue()); @@ -72,7 +72,7 @@ public void testMergeObjectDynamic() throws Exception { DocumentMapper withDynamicMapper = parser.parse("type1", new CompressedXContent(withDynamicMapping)); assertThat(withDynamicMapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE)); - DocumentMapper merged = mapper.merge(withDynamicMapper.mapping(), false); + DocumentMapper merged = mapper.merge(withDynamicMapper.mapping()); assertThat(merged.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE)); } @@ -88,14 +88,14 @@ public void testMergeObjectAndNested() throws Exception { DocumentMapper nestedMapper = parser.parse("type1", new CompressedXContent(nestedMapping)); try { - objectMapper.merge(nestedMapper.mapping(), false); + objectMapper.merge(nestedMapper.mapping()); fail(); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("object mapping [obj] can't be changed from non-nested to nested")); } try { - nestedMapper.merge(objectMapper.mapping(), false); + nestedMapper.merge(objectMapper.mapping()); fail(); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("object mapping [obj] can't be changed from nested to non-nested")); @@ -115,7 +115,7 @@ public void testMergeSearchAnalyzer() throws Exception { DocumentMapper changed = parser.parse("type", new CompressedXContent(mapping2)); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("whitespace")); - DocumentMapper merged = existing.merge(changed.mapping(), false); + DocumentMapper merged = existing.merge(changed.mapping()); assertThat(((NamedAnalyzer) merged.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("keyword")); } @@ -129,8 +129,8 @@ public void testChangeSearchAnalyzerToDefault() throws Exception { .startObject("properties").startObject("field").field("type", "text").field("analyzer", "standard").endObject().endObject() .endObject().endObject().string(); - DocumentMapper existing = mapperService.merge("type", new CompressedXContent(mapping1), MapperService.MergeReason.MAPPING_UPDATE, false); - DocumentMapper merged = mapperService.merge("type", new CompressedXContent(mapping2), MapperService.MergeReason.MAPPING_UPDATE, false); + DocumentMapper existing = mapperService.merge("type", new CompressedXContent(mapping1), MapperService.MergeReason.MAPPING_UPDATE); + DocumentMapper merged = mapperService.merge("type", new CompressedXContent(mapping2), MapperService.MergeReason.MAPPING_UPDATE); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("whitespace")); @@ -139,7 +139,7 @@ public void testChangeSearchAnalyzerToDefault() throws Exception { public void testConcurrentMergeTest() throws Throwable { final MapperService mapperService = createIndex("test").mapperService(); - mapperService.merge("test", new CompressedXContent("{\"test\":{}}"), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("test", new CompressedXContent("{\"test\":{}}"), MapperService.MergeReason.MAPPING_UPDATE); final DocumentMapper documentMapper = mapperService.documentMapper("test"); DocumentFieldMappers dfm = documentMapper.mappers(); @@ -169,7 +169,7 @@ public void run() { Mapping update = doc.dynamicMappingsUpdate(); assert update != null; lastIntroducedFieldName.set(fieldName); - mapperService.merge("test", new CompressedXContent(update.toString()), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("test", new CompressedXContent(update.toString()), MapperService.MergeReason.MAPPING_UPDATE); } } catch (Exception e) { error.set(e); @@ -213,7 +213,7 @@ public void testDoNotRepeatOriginalMapping() throws IOException { .endObject() .endObject().endObject().bytes()); MapperService mapperService = createIndex("test").mapperService(); - mapperService.merge("type", mapping, MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type", mapping, MapperService.MergeReason.MAPPING_UPDATE); CompressedXContent update = new CompressedXContent(XContentFactory.jsonBuilder().startObject() .startObject("type") @@ -223,7 +223,7 @@ public void testDoNotRepeatOriginalMapping() throws IOException { .endObject() .endObject() .endObject().endObject().bytes()); - DocumentMapper mapper = mapperService.merge("type", update, MapperService.MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", update, MapperService.MergeReason.MAPPING_UPDATE); assertNotNull(mapper.mappers().getMapper("foo")); assertFalse(mapper.sourceMapper().enabled()); @@ -244,7 +244,7 @@ public void testMergeChildType() throws IOException { .startObject("name").field("type", "text").endObject() .endObject().endObject().endObject().string(); DocumentMapper updatedMapper1 = parser.parse("child", new CompressedXContent(updatedMapping1)); - DocumentMapper mergedMapper1 = initMapper.merge(updatedMapper1.mapping(), false); + DocumentMapper mergedMapper1 = initMapper.merge(updatedMapper1.mapping()); assertThat(mergedMapper1.mappers().getMapper("_parent#parent"), notNullValue()); assertThat(mergedMapper1.mappers().getMapper("name"), notNullValue()); @@ -255,7 +255,7 @@ public void testMergeChildType() throws IOException { .startObject("age").field("type", "byte").endObject() .endObject().endObject().endObject().string(); DocumentMapper updatedMapper2 = parser.parse("child", new CompressedXContent(updatedMapping2)); - DocumentMapper mergedMapper2 = mergedMapper1.merge(updatedMapper2.mapping(), false); + DocumentMapper mergedMapper2 = mergedMapper1.merge(updatedMapper2.mapping()); assertThat(mergedMapper2.mappers().getMapper("_parent#parent"), notNullValue()); assertThat(mergedMapper2.mappers().getMapper("name"), notNullValue()); @@ -265,7 +265,7 @@ public void testMergeChildType() throws IOException { .startObject("_parent").field("type", "new_parent").endObject() .endObject().endObject().string(); DocumentMapper modParentMapper = parser.parse("child", new CompressedXContent(modParentMapping)); - Exception e = expectThrows(IllegalArgumentException.class, () -> initMapper.merge(modParentMapper.mapping(), false)); + Exception e = expectThrows(IllegalArgumentException.class, () -> initMapper.merge(modParentMapper.mapping())); assertThat(e.getMessage(), containsString("The _parent field's type option can't be changed: [parent]->[new_parent]")); } @@ -286,7 +286,7 @@ public void testMergeAddingParent() throws IOException { .startObject("age").field("type", "byte").endObject() .endObject().endObject().endObject().string(); DocumentMapper updatedMapper = parser.parse("cowboy", new CompressedXContent(updatedMapping)); - Exception e = expectThrows(IllegalArgumentException.class, () -> initMapper.merge(updatedMapper.mapping(), false)); + Exception e = expectThrows(IllegalArgumentException.class, () -> initMapper.merge(updatedMapper.mapping())); assertThat(e.getMessage(), containsString("The _parent field's type option can't be changed: [null]->[parent]")); } @@ -317,7 +317,7 @@ public void testMergeMeta() throws IOException { .string(); DocumentMapper updatedMapper = parser.parse("test", new CompressedXContent(updateMapping)); - assertThat(initMapper.merge(updatedMapper.mapping(), true).meta().get("foo"), equalTo("bar")); + assertThat(initMapper.merge(updatedMapper.mapping()).meta().get("foo"), equalTo("bar")); updateMapping = XContentFactory.jsonBuilder() .startObject() @@ -330,6 +330,6 @@ public void testMergeMeta() throws IOException { .string(); updatedMapper = parser.parse("test", new CompressedXContent(updateMapping)); - assertThat(initMapper.merge(updatedMapper.mapping(), true).meta().get("foo"), equalTo("new_bar")); + assertThat(initMapper.merge(updatedMapper.mapping()).meta().get("foo"), equalTo("new_bar")); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index b227833f3444d..fd61afc566efc 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -552,7 +552,7 @@ public void testMixTemplateMultiFieldAndMappingReuse() throws Exception { .endArray() .endObject().endObject(); indexService.mapperService().merge("_doc", new CompressedXContent(mappings1.bytes()), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); XContentBuilder json = XContentFactory.jsonBuilder().startObject() .field("field", "foo") @@ -564,7 +564,7 @@ public void testMixTemplateMultiFieldAndMappingReuse() throws Exception { assertNotNull(parsed.dynamicMappingsUpdate()); indexService.mapperService().merge("_doc", new CompressedXContent(parsed.dynamicMappingsUpdate().toString()), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); mapper = indexService.mapperService().documentMapper("_doc"); assertNotNull(mapper.mappers().getMapper("field.raw")); parsed = mapper.parse(source); @@ -591,7 +591,7 @@ public void testMixTemplateMultiFieldMultiTypeAndMappingReuse() throws Exception .endObject() .endArray() .endObject().endObject(); - indexService.mapperService().merge("type1", new CompressedXContent(mappings1.bytes()), MapperService.MergeReason.MAPPING_UPDATE, false); + indexService.mapperService().merge("type1", new CompressedXContent(mappings1.bytes()), MapperService.MergeReason.MAPPING_UPDATE); XContentBuilder mappings2 = jsonBuilder().startObject() .startObject("type2") .startObject("properties") @@ -600,7 +600,7 @@ public void testMixTemplateMultiFieldMultiTypeAndMappingReuse() throws Exception .endObject() .endObject() .endObject().endObject(); - indexService.mapperService().merge("type2", new CompressedXContent(mappings2.bytes()), MapperService.MergeReason.MAPPING_UPDATE, false); + indexService.mapperService().merge("type2", new CompressedXContent(mappings2.bytes()), MapperService.MergeReason.MAPPING_UPDATE); XContentBuilder json = XContentFactory.jsonBuilder().startObject() .field("field", "foo") @@ -611,7 +611,7 @@ public void testMixTemplateMultiFieldMultiTypeAndMappingReuse() throws Exception ParsedDocument parsed = mapper.parse(source); assertNotNull(parsed.dynamicMappingsUpdate()); - indexService.mapperService().merge("type1", new CompressedXContent(parsed.dynamicMappingsUpdate().toString()), MapperService.MergeReason.MAPPING_UPDATE, false); + indexService.mapperService().merge("type1", new CompressedXContent(parsed.dynamicMappingsUpdate().toString()), MapperService.MergeReason.MAPPING_UPDATE); mapper = indexService.mapperService().documentMapper("type1"); assertNotNull(mapper.mappers().getMapper("field.raw")); parsed = mapper.parse(source); @@ -624,7 +624,7 @@ public void testDefaultFloatingPointMappings() throws IOException { .startObject("type") .field("numeric_detection", true) .endObject().endObject().string(); - mapperService.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); DocumentMapper mapper = mapperService.documentMapper("type"); doTestDefaultFloatingPointMappings(mapper, XContentFactory.jsonBuilder()); doTestDefaultFloatingPointMappings(mapper, XContentFactory.yamlBuilder()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java index 67c8435520aef..8c2e6d475414d 100755 --- a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java @@ -199,7 +199,7 @@ protected void parseCreateField(ParseContext context, List field } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { // ignore this for now } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FakeStringFieldMapper.java b/server/src/test/java/org/elasticsearch/index/mapper/FakeStringFieldMapper.java index 464b0d9f8406a..efb2023ee6919 100755 --- a/server/src/test/java/org/elasticsearch/index/mapper/FakeStringFieldMapper.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FakeStringFieldMapper.java @@ -156,8 +156,8 @@ protected String contentType() { } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java index 3655f04fcbba1..f075353736672 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java @@ -137,11 +137,11 @@ public void testMergingMappings() throws Exception { .endObject().endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapperEnabled = mapperService.merge("type", new CompressedXContent(enabledMapping), MapperService.MergeReason.MAPPING_UPDATE, false); - DocumentMapper mapperDisabled = mapperService.merge("type", new CompressedXContent(disabledMapping), MapperService.MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapperEnabled = mapperService.merge("type", new CompressedXContent(enabledMapping), MapperService.MergeReason.MAPPING_UPDATE); + DocumentMapper mapperDisabled = mapperService.merge("type", new CompressedXContent(disabledMapping), MapperService.MergeReason.MAPPING_UPDATE); assertFalse(mapperDisabled.metadataMapper(FieldNamesFieldMapper.class).fieldType().isEnabled()); - mapperEnabled = mapperService.merge("type", new CompressedXContent(enabledMapping), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperEnabled = mapperService.merge("type", new CompressedXContent(enabledMapping), MapperService.MergeReason.MAPPING_UPDATE); assertTrue(mapperEnabled.metadataMapper(FieldNamesFieldMapper.class).fieldType().isEnabled()); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java index fe885a46b87ef..39753548ee390 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java @@ -39,7 +39,6 @@ public class FieldTypeLookupTests extends ESTestCase { public void testEmpty() { FieldTypeLookup lookup = new FieldTypeLookup(); assertNull(lookup.get("foo")); - assertEquals(Collections.emptySet(), lookup.getTypes("foo")); Collection names = lookup.simpleMatchToFullName("foo"); assertNotNull(names); assertTrue(names.isEmpty()); @@ -51,7 +50,7 @@ public void testEmpty() { public void testDefaultMapping() { FieldTypeLookup lookup = new FieldTypeLookup(); try { - lookup.copyAndAddAll(MapperService.DEFAULT_MAPPING, Collections.emptyList(), randomBoolean()); + lookup.copyAndAddAll(MapperService.DEFAULT_MAPPING, Collections.emptyList()); fail(); } catch (IllegalArgumentException expected) { assertEquals("Default mappings should not be added to the lookup", expected.getMessage()); @@ -61,15 +60,11 @@ public void testDefaultMapping() { public void testAddNewField() { FieldTypeLookup lookup = new FieldTypeLookup(); MockFieldMapper f = new MockFieldMapper("foo"); - FieldTypeLookup lookup2 = lookup.copyAndAddAll("type", newList(f), randomBoolean()); + FieldTypeLookup lookup2 = lookup.copyAndAddAll("type", newList(f)); assertNull(lookup.get("foo")); assertNull(lookup.get("bar")); assertEquals(f.fieldType(), lookup2.get("foo")); assertNull(lookup.get("bar")); - assertEquals(Collections.emptySet(), lookup.getTypes("foo")); - assertEquals(Collections.emptySet(), lookup.getTypes("bar")); - assertEquals(Collections.singleton("type"), lookup2.getTypes("foo")); - assertEquals(Collections.emptySet(), lookup2.getTypes("bar")); assertEquals(1, size(lookup2.iterator())); } @@ -77,8 +72,8 @@ public void testAddExistingField() { MockFieldMapper f = new MockFieldMapper("foo"); MockFieldMapper f2 = new MockFieldMapper("foo"); FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll("type1", newList(f), true); - FieldTypeLookup lookup2 = lookup.copyAndAddAll("type2", newList(f2), true); + lookup = lookup.copyAndAddAll("type1", newList(f)); + FieldTypeLookup lookup2 = lookup.copyAndAddAll("type2", newList(f2)); assertEquals(1, size(lookup2.iterator())); assertSame(f.fieldType(), lookup2.get("foo")); @@ -89,8 +84,8 @@ public void testAddExistingIndexName() { MockFieldMapper f = new MockFieldMapper("foo"); MockFieldMapper f2 = new MockFieldMapper("bar"); FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll("type1", newList(f), randomBoolean()); - FieldTypeLookup lookup2 = lookup.copyAndAddAll("type2", newList(f2), randomBoolean()); + lookup = lookup.copyAndAddAll("type1", newList(f)); + FieldTypeLookup lookup2 = lookup.copyAndAddAll("type2", newList(f2)); assertSame(f.fieldType(), lookup2.get("foo")); assertSame(f2.fieldType(), lookup2.get("bar")); @@ -102,7 +97,7 @@ public void testAddExistingFullName() { MockFieldMapper f2 = new MockFieldMapper("foo"); FieldTypeLookup lookup = new FieldTypeLookup(); try { - lookup.copyAndAddAll("type2", newList(f2), randomBoolean()); + lookup.copyAndAddAll("type2", newList(f2)); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("mapper [foo] has different [index_name]")); } @@ -111,20 +106,13 @@ public void testAddExistingFullName() { public void testCheckCompatibilityMismatchedTypes() { FieldMapper f1 = new MockFieldMapper("foo"); FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll("type", newList(f1), randomBoolean()); + lookup = lookup.copyAndAddAll("type", newList(f1)); OtherFakeFieldType ft2 = new OtherFakeFieldType(); ft2.setName("foo"); FieldMapper f2 = new MockFieldMapper("foo", ft2); try { - lookup.copyAndAddAll("type2", newList(f2), false); - fail("expected type mismatch"); - } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().contains("cannot be changed from type [faketype] to [otherfaketype]")); - } - // fails even if updateAllTypes == true - try { - lookup.copyAndAddAll("type2", newList(f2), true); + lookup.copyAndAddAll("type2", newList(f2)); fail("expected type mismatch"); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("cannot be changed from type [faketype] to [otherfaketype]")); @@ -134,35 +122,21 @@ public void testCheckCompatibilityMismatchedTypes() { public void testCheckCompatibilityConflict() { FieldMapper f1 = new MockFieldMapper("foo"); FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll("type", newList(f1), randomBoolean()); + lookup = lookup.copyAndAddAll("type", newList(f1)); MappedFieldType ft2 = new MockFieldMapper.FakeFieldType(); ft2.setName("foo"); ft2.setBoost(2.0f); FieldMapper f2 = new MockFieldMapper("foo", ft2); - try { - // different type - lookup.copyAndAddAll("type2", newList(f2), false); - fail("expected conflict"); - } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().contains("to update [boost] across all types")); - } - lookup.copyAndAddAll("type", newList(f2), false); // boost is updateable, so ok since we are implicitly updating all types - lookup.copyAndAddAll("type2", newList(f2), true); // boost is updateable, so ok if forcing + lookup.copyAndAddAll("type", newList(f2)); // boost is updateable, so ok since we are implicitly updating all types + lookup.copyAndAddAll("type2", newList(f2)); // boost is updateable, so ok if forcing // now with a non changeable setting MappedFieldType ft3 = new MockFieldMapper.FakeFieldType(); ft3.setName("foo"); ft3.setStored(true); FieldMapper f3 = new MockFieldMapper("foo", ft3); try { - lookup.copyAndAddAll("type2", newList(f3), false); - fail("expected conflict"); - } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().contains("has different [store] values")); - } - // even with updateAllTypes == true, incompatible - try { - lookup.copyAndAddAll("type2", newList(f3), true); + lookup.copyAndAddAll("type2", newList(f3)); fail("expected conflict"); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("has different [store] values")); @@ -173,7 +147,7 @@ public void testSimpleMatchFullNames() { MockFieldMapper f1 = new MockFieldMapper("foo"); MockFieldMapper f2 = new MockFieldMapper("bar"); FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll("type", newList(f1, f2), randomBoolean()); + lookup = lookup.copyAndAddAll("type", newList(f1, f2)); Collection names = lookup.simpleMatchToFullName("b*"); assertFalse(names.contains("foo")); assertTrue(names.contains("bar")); @@ -182,7 +156,7 @@ public void testSimpleMatchFullNames() { public void testIteratorImmutable() { MockFieldMapper f1 = new MockFieldMapper("foo"); FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll("type", newList(f1), randomBoolean()); + lookup = lookup.copyAndAddAll("type", newList(f1)); try { Iterator itr = lookup.iterator(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java index e43cfbe1fd1c1..a9a830a4141e9 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java @@ -417,13 +417,13 @@ public void testGeoShapeMapperMerge() throws Exception { .field("precision", "1m").field("tree_levels", 8).field("distance_error_pct", 0.01).field("orientation", "ccw") .endObject().endObject().endObject().endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper docMapper = mapperService.merge("type", new CompressedXContent(stage1Mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + DocumentMapper docMapper = mapperService.merge("type", new CompressedXContent(stage1Mapping), MapperService.MergeReason.MAPPING_UPDATE); String stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("shape").field("type", "geo_shape").field("tree", "quadtree") .field("strategy", "term").field("precision", "1km").field("tree_levels", 26).field("distance_error_pct", 26) .field("orientation", "cw").endObject().endObject().endObject().endObject().string(); try { - mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("mapper [shape] has different [strategy]")); @@ -449,7 +449,7 @@ public void testGeoShapeMapperMerge() throws Exception { stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("shape").field("type", "geo_shape").field("precision", "1m") .field("tree_levels", 8).field("distance_error_pct", 0.001).field("orientation", "cw").endObject().endObject().endObject().endObject().string(); - docMapper = mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + docMapper = mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE); fieldMapper = docMapper.mappers().getMapper("shape"); assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java index ec07c4d92be3e..111389336f291 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java @@ -62,7 +62,7 @@ public void testDefaultsMultipleTypes() throws IOException { .put("index.version.created", Version.V_5_6_0) .build(); MapperService mapperService = createIndex("test", indexSettings).mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); assertEquals(Collections.emptyList(), Arrays.asList(document.rootDoc().getFields(IdFieldMapper.NAME))); } @@ -70,7 +70,7 @@ public void testDefaultsMultipleTypes() throws IOException { public void testDefaultsSingleType() throws IOException { Settings indexSettings = Settings.EMPTY; MapperService mapperService = createIndex("test", indexSettings).mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); IndexableField[] fields = document.rootDoc().getFields(IdFieldMapper.NAME); assertEquals(1, fields.length); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/JavaMultiFieldMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/JavaMultiFieldMergeTests.java index c17df90b5a21d..49034a0b28785 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/JavaMultiFieldMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/JavaMultiFieldMergeTests.java @@ -40,7 +40,7 @@ public void testMergeMultiField() throws Exception { String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json"); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + DocumentMapper docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); assertThat(docMapper.mappers().getMapper("name.indexed"), nullValue()); @@ -53,7 +53,7 @@ public void testMergeMultiField() throws Exception { assertThat(f, nullValue()); mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json"); - docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); @@ -70,7 +70,7 @@ public void testMergeMultiField() throws Exception { assertThat(f, notNullValue()); mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json"); - docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); @@ -81,7 +81,7 @@ public void testMergeMultiField() throws Exception { assertThat(docMapper.mappers().getMapper("name.not_indexed3"), nullValue()); mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json"); - docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); @@ -96,7 +96,7 @@ public void testUpgradeFromMultiFieldTypeToMultiFields() throws Exception { String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json"); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + DocumentMapper docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); assertThat(docMapper.mappers().getMapper("name.indexed"), nullValue()); @@ -110,7 +110,7 @@ public void testUpgradeFromMultiFieldTypeToMultiFields() throws Exception { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/upgrade1.json"); - docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); @@ -127,7 +127,7 @@ public void testUpgradeFromMultiFieldTypeToMultiFields() throws Exception { assertThat(f, notNullValue()); mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/upgrade2.json"); - docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); @@ -140,7 +140,7 @@ public void testUpgradeFromMultiFieldTypeToMultiFields() throws Exception { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/upgrade3.json"); try { - mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("mapper [name] has different [index] values")); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index e67b25b051b4e..aa663ed5699a0 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -358,7 +358,7 @@ public void testUpdateNormalizer() throws IOException { .startObject("properties").startObject("field") .field("type", "keyword").field("normalizer", "my_lowercase").endObject().endObject() .endObject().endObject().string(); - indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()); + indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") @@ -366,9 +366,9 @@ public void testUpdateNormalizer() throws IOException { .endObject().endObject().string(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("type", - new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, randomBoolean())); + new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE)); assertEquals( - "Mapper for [field] conflicts with existing mapping in other types:\n[mapper [field] has different [normalizer]]", + "Mapper for [field] conflicts with existing mapping:\n[mapper [field] has different [normalizer]]", e.getMessage()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index 95183cc854a38..6d7665d889563 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -89,15 +89,15 @@ public void testTypes() throws Exception { MapperService mapperService = indexService1.mapperService(); assertEquals(Collections.emptySet(), mapperService.types()); - mapperService.merge("type1", new CompressedXContent("{\"type1\":{}}"), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type1", new CompressedXContent("{\"type1\":{}}"), MapperService.MergeReason.MAPPING_UPDATE); assertNull(mapperService.documentMapper(MapperService.DEFAULT_MAPPING)); assertEquals(Collections.singleton("type1"), mapperService.types()); - mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent("{\"_default_\":{}}"), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent("{\"_default_\":{}}"), MapperService.MergeReason.MAPPING_UPDATE); assertNotNull(mapperService.documentMapper(MapperService.DEFAULT_MAPPING)); assertEquals(Collections.singleton("type1"), mapperService.types()); - mapperService.merge("type2", new CompressedXContent("{\"type2\":{}}"), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type2", new CompressedXContent("{\"type2\":{}}"), MapperService.MergeReason.MAPPING_UPDATE); assertNotNull(mapperService.documentMapper(MapperService.DEFAULT_MAPPING)); assertEquals(new HashSet<>(Arrays.asList("type1", "type2")), mapperService.types()); } @@ -148,11 +148,11 @@ public void testTotalFieldsExceedsLimit() throws Throwable { throw new UncheckedIOException(e); } }; - createIndex("test1").mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false); + createIndex("test1").mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE); //set total number of fields to 1 to trigger an exception IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { createIndex("test2", Settings.builder().put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), 1).build()) - .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false); + .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE); }); assertTrue(e.getMessage(), e.getMessage().contains("Limit of total fields [1] in index [test2] has been exceeded")); } @@ -166,7 +166,7 @@ public void testMappingDepthExceedsLimit() throws Throwable { .endObject().endObject().bytes()); IndexService indexService1 = createIndex("test1", Settings.builder().put(MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING.getKey(), 1).build()); // no exception - indexService1.mapperService().merge("type", simpleMapping, MergeReason.MAPPING_UPDATE, false); + indexService1.mapperService().merge("type", simpleMapping, MergeReason.MAPPING_UPDATE); CompressedXContent objectMapping = new CompressedXContent(XContentFactory.jsonBuilder().startObject() .startObject("properties") @@ -177,10 +177,10 @@ public void testMappingDepthExceedsLimit() throws Throwable { IndexService indexService2 = createIndex("test2"); // no exception - indexService2.mapperService().merge("type", objectMapping, MergeReason.MAPPING_UPDATE, false); + indexService2.mapperService().merge("type", objectMapping, MergeReason.MAPPING_UPDATE); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> indexService1.mapperService().merge("type2", objectMapping, MergeReason.MAPPING_UPDATE, false)); + () -> indexService1.mapperService().merge("type2", objectMapping, MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), containsString("Limit of mapping depth [1] in index [test1] has been exceeded")); } @@ -200,14 +200,14 @@ public void testMergeWithMap() throws Throwable { mappings.put(MapperService.DEFAULT_MAPPING, MapperService.parseMapping(xContentRegistry(), "{}")); MapperException e = expectThrows(MapperParsingException.class, - () -> mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, false)); + () -> mapperService.merge(mappings, MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), startsWith("Failed to parse mapping [" + MapperService.DEFAULT_MAPPING + "]: ")); mappings.clear(); mappings.put("type1", MapperService.parseMapping(xContentRegistry(), "{}")); e = expectThrows( MapperParsingException.class, - () -> mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, false)); + () -> mapperService.merge(mappings, MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), startsWith("Failed to parse mapping [type1]: ")); } @@ -223,7 +223,7 @@ public void testMergeParentTypesSame() { Set parentTypes = mapperService.getParentTypes(); Map> mappings = new HashMap<>(); - mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, false); + mapperService.merge(mappings, MergeReason.MAPPING_UPDATE); assertSame(parentTypes, mapperService.getParentTypes()); } @@ -238,10 +238,10 @@ public void testOtherDocumentMappersOnlyUpdatedWhenChangingFieldType() throws IO .endObject() .endObject().endObject().bytes()); - indexService.mapperService().merge("type1", simpleMapping, MergeReason.MAPPING_UPDATE, true); + indexService.mapperService().merge("type1", simpleMapping, MergeReason.MAPPING_UPDATE); DocumentMapper documentMapper = indexService.mapperService().documentMapper("type1"); - indexService.mapperService().merge("type2", simpleMapping, MergeReason.MAPPING_UPDATE, true); + indexService.mapperService().merge("type2", simpleMapping, MergeReason.MAPPING_UPDATE); assertSame(indexService.mapperService().documentMapper("type1"), documentMapper); CompressedXContent normsDisabledMapping = new CompressedXContent(XContentFactory.jsonBuilder().startObject() @@ -252,7 +252,7 @@ public void testOtherDocumentMappersOnlyUpdatedWhenChangingFieldType() throws IO .endObject() .endObject().endObject().bytes()); - indexService.mapperService().merge("type3", normsDisabledMapping, MergeReason.MAPPING_UPDATE, true); + indexService.mapperService().merge("type3", normsDisabledMapping, MergeReason.MAPPING_UPDATE); assertNotSame(indexService.mapperService().documentMapper("type1"), documentMapper); } @@ -307,7 +307,7 @@ public void testIndexSortWithNestedFields() throws IOException { .endObject().endObject().bytes()); invalidNestedException = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("t", nestedFieldMapping, - MergeReason.MAPPING_UPDATE, true)); + MergeReason.MAPPING_UPDATE)); assertThat(invalidNestedException.getMessage(), containsString("cannot have nested fields when index sort is activated")); } @@ -315,18 +315,18 @@ public void testIndexSortWithNestedFields() throws IOException { public void testForbidMultipleTypes() throws IOException { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()); + mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type2").endObject().endObject().string(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> mapperService.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, randomBoolean())); + () -> mapperService.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), Matchers.startsWith("Rejecting mapping update to [test] as the final mapping would have more than 1 type: ")); } public void testDefaultMappingIsDeprecated() throws IOException { String mapping = XContentFactory.jsonBuilder().startObject().startObject("_default_").endObject().endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - mapperService.merge("_default_", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()); + mapperService.merge("_default_", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertWarnings("[_default_] mapping is deprecated since it is not useful anymore now that indexes " + "cannot have more than one type"); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index 8da4b302a6f8f..bbcad5b7203a2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -444,23 +444,23 @@ public void testLimitOfNestedFieldsPerIndex() throws Exception { }; // default limit allows at least two nested fields - createIndex("test1").mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false); + createIndex("test1").mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE); // explicitly setting limit to 0 prevents nested fields Exception e = expectThrows(IllegalArgumentException.class, () -> createIndex("test2", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 0).build()) - .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false)); + .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), containsString("Limit of nested fields [0] in index [test2] has been exceeded")); // setting limit to 1 with 2 nested fields fails e = expectThrows(IllegalArgumentException.class, () -> createIndex("test3", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 1).build()) - .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false)); + .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), containsString("Limit of nested fields [1] in index [test3] has been exceeded")); // do not check nested fields limit if mapping is not updated createIndex("test4", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 0).build()) - .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_RECOVERY, false); + .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_RECOVERY); } public void testLimitOfNestedFieldsWithMultiTypePerIndex() throws Exception { @@ -479,19 +479,19 @@ public void testLimitOfNestedFieldsWithMultiTypePerIndex() throws Exception { MapperService mapperService = createIndex("test4", Settings.builder() .put("index.version.created", Version.V_5_6_0) .put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 2).build()).mapperService(); - mapperService.merge("type1", new CompressedXContent(mapping.apply("type1")), MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type1", new CompressedXContent(mapping.apply("type1")), MergeReason.MAPPING_UPDATE); // merging same fields, but different type is ok - mapperService.merge("type2", new CompressedXContent(mapping.apply("type2")), MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type2", new CompressedXContent(mapping.apply("type2")), MergeReason.MAPPING_UPDATE); // adding new fields from different type is not ok String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type3").startObject("properties").startObject("nested3") .field("type", "nested").startObject("properties").endObject().endObject().endObject().endObject().endObject().string(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - mapperService.merge("type3", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false)); + mapperService.merge("type3", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), containsString("Limit of nested fields [2] in index [test4] has been exceeded")); // do not check nested fields limit if mapping is not updated createIndex("test5", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 0).build()) - .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_RECOVERY, false); + .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_RECOVERY); } public void testParentObjectMapperAreNested() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java index bba2007285bcc..ea8f63345a183 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java @@ -181,13 +181,13 @@ public void testMerge() throws IOException { .endObject() .endObject().endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertNull(mapper.root().dynamic()); String update = XContentFactory.jsonBuilder().startObject() .startObject("type") .field("dynamic", "strict") .endObject().endObject().string(); - mapper = mapperService.merge("type", new CompressedXContent(update), MergeReason.MAPPING_UPDATE, false); + mapper = mapperService.merge("type", new CompressedXContent(update), MergeReason.MAPPING_UPDATE); assertEquals(Dynamic.STRICT, mapper.root().dynamic()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java index d21827ee18cea..dee554449bcc4 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java @@ -78,8 +78,8 @@ public void testJoinFieldSet() throws Exception { .startObject("_parent").field("type", "parent_type").endObject() .endObject().endObject().string(); IndexService indexService = createIndex("test", Settings.builder().put("index.version.created", Version.V_5_6_0).build()); - indexService.mapperService().merge("parent_type", new CompressedXContent(parentMapping), MergeReason.MAPPING_UPDATE, false); - indexService.mapperService().merge("child_type", new CompressedXContent(childMapping), MergeReason.MAPPING_UPDATE, false); + indexService.mapperService().merge("parent_type", new CompressedXContent(parentMapping), MergeReason.MAPPING_UPDATE); + indexService.mapperService().merge("child_type", new CompressedXContent(childMapping), MergeReason.MAPPING_UPDATE); // Indexing parent doc: DocumentMapper parentDocMapper = indexService.mapperService().documentMapper("parent_type"); @@ -121,7 +121,7 @@ public void testNoParentNullFieldCreatedIfNoParentSpecified() throws Exception { .startObject("properties") .endObject() .endObject().endObject(); - mapperService.merge("some_type", new CompressedXContent(mappingSource.string()), MergeReason.MAPPING_UPDATE, false); + mapperService.merge("some_type", new CompressedXContent(mappingSource.string()), MergeReason.MAPPING_UPDATE); Set allFields = new HashSet<>(mapperService.simpleMatchToIndexNames("*")); assertTrue(allFields.contains("_parent")); assertFalse(allFields.contains("_parent#null")); @@ -146,15 +146,15 @@ public void testUpdateEagerGlobalOrds() throws IOException { .startObject("_parent").field("type", "parent_type").endObject() .endObject().endObject().string(); IndexService indexService = createIndex("test", Settings.builder().put("index.version.created", Version.V_5_6_0).build()); - indexService.mapperService().merge("parent_type", new CompressedXContent(parentMapping), MergeReason.MAPPING_UPDATE, false); - indexService.mapperService().merge("child_type", new CompressedXContent(childMapping), MergeReason.MAPPING_UPDATE, false); + indexService.mapperService().merge("parent_type", new CompressedXContent(parentMapping), MergeReason.MAPPING_UPDATE); + indexService.mapperService().merge("child_type", new CompressedXContent(childMapping), MergeReason.MAPPING_UPDATE); assertTrue(indexService.mapperService().documentMapper("child_type").parentFieldMapper().fieldType().eagerGlobalOrdinals()); String childMappingUpdate = XContentFactory.jsonBuilder().startObject().startObject("child_type") .startObject("_parent").field("type", "parent_type").field("eager_global_ordinals", false).endObject() .endObject().endObject().string(); - indexService.mapperService().merge("child_type", new CompressedXContent(childMappingUpdate), MergeReason.MAPPING_UPDATE, false); + indexService.mapperService().merge("child_type", new CompressedXContent(childMappingUpdate), MergeReason.MAPPING_UPDATE); assertFalse(indexService.mapperService().documentMapper("child_type").parentFieldMapper().fieldType().eagerGlobalOrdinals()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java index 72195fbd954fc..c93d968b448fe 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java @@ -61,7 +61,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws DOUBLE_RANGE_FIELD_NAME, "type=double_range", DATE_RANGE_FIELD_NAME, "type=date_range", IP_RANGE_FIELD_NAME, "type=ip_range" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java index a76d5d01316fb..d55b90573e9cd 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java @@ -36,7 +36,7 @@ public void testNumericDetection() throws Exception { .endObject() .endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); // update with a different explicit value @@ -46,7 +46,7 @@ public void testNumericDetection() throws Exception { .field("numeric_detection", true) .endObject() .endObject().string(); - mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false); + mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); assertEquals(mapping2, mapper.mappingSource().toString()); // update with an implicit value: no change @@ -55,7 +55,7 @@ public void testNumericDetection() throws Exception { .startObject("type") .endObject() .endObject().string(); - mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE, false); + mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE); assertEquals(mapping2, mapper.mappingSource().toString()); } @@ -67,7 +67,7 @@ public void testDateDetection() throws Exception { .endObject() .endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); // update with a different explicit value @@ -77,7 +77,7 @@ public void testDateDetection() throws Exception { .field("date_detection", false) .endObject() .endObject().string(); - mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false); + mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); assertEquals(mapping2, mapper.mappingSource().toString()); // update with an implicit value: no change @@ -86,7 +86,7 @@ public void testDateDetection() throws Exception { .startObject("type") .endObject() .endObject().string(); - mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE, false); + mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE); assertEquals(mapping2, mapper.mappingSource().toString()); } @@ -98,7 +98,7 @@ public void testDateFormatters() throws Exception { .endObject() .endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); // no update if formatters are not set explicitly @@ -107,7 +107,7 @@ public void testDateFormatters() throws Exception { .startObject("type") .endObject() .endObject().string(); - mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false); + mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); String mapping3 = XContentFactory.jsonBuilder() @@ -116,7 +116,7 @@ public void testDateFormatters() throws Exception { .field("dynamic_date_formats", Arrays.asList()) .endObject() .endObject().string(); - mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE, false); + mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE); assertEquals(mapping3, mapper.mappingSource().toString()); } @@ -137,7 +137,7 @@ public void testDynamicTemplates() throws Exception { .endObject() .endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); // no update if templates are not set explicitly @@ -146,7 +146,7 @@ public void testDynamicTemplates() throws Exception { .startObject("type") .endObject() .endObject().string(); - mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false); + mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); String mapping3 = XContentFactory.jsonBuilder() @@ -155,7 +155,7 @@ public void testDynamicTemplates() throws Exception { .field("dynamic_templates", Arrays.asList()) .endObject() .endObject().string(); - mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE, false); + mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE); assertEquals(mapping3, mapper.mappingSource().toString()); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java index 85017cb35cd39..f40229e9ebe78 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java @@ -164,7 +164,7 @@ public void testDefaultMappingAndNoMappingWithMapperService() throws Exception { Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_6_0).build(); MapperService mapperService = createIndex("test", settings).mapperService(); - mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(defaultMapping), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(defaultMapping), MapperService.MergeReason.MAPPING_UPDATE); DocumentMapper mapper = mapperService.documentMapperWithAutoCreate("my_type").getDocumentMapper(); assertThat(mapper.type(), equalTo("my_type")); @@ -178,12 +178,12 @@ public void testDefaultMappingAndWithMappingOverrideWithMapperService() throws E Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_6_0).build(); MapperService mapperService = createIndex("test", settings).mapperService(); - mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(defaultMapping), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(defaultMapping), MapperService.MergeReason.MAPPING_UPDATE); String mapping = XContentFactory.jsonBuilder().startObject().startObject("my_type") .startObject("_source").field("enabled", true).endObject() .endObject().endObject().string(); - mapperService.merge("my_type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("my_type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); DocumentMapper mapper = mapperService.documentMapper("my_type"); assertThat(mapper.type(), equalTo("my_type")); @@ -194,10 +194,10 @@ void assertConflicts(String mapping1, String mapping2, DocumentMapperParser pars DocumentMapper docMapper = parser.parse("type", new CompressedXContent(mapping1)); docMapper = parser.parse("type", docMapper.mappingSource()); if (conflicts.length == 0) { - docMapper.merge(parser.parse("type", new CompressedXContent(mapping2)).mapping(), false); + docMapper.merge(parser.parse("type", new CompressedXContent(mapping2)).mapping()); } else { try { - docMapper.merge(parser.parse("type", new CompressedXContent(mapping2)).mapping(), false); + docMapper.merge(parser.parse("type", new CompressedXContent(mapping2)).mapping()); fail(); } catch (IllegalArgumentException e) { for (String conflict : conflicts) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/StoredNumericValuesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/StoredNumericValuesTests.java index 438ccd5fa8688..2ff2bda01df25 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/StoredNumericValuesTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/StoredNumericValuesTests.java @@ -62,7 +62,7 @@ public void testBytesAndNumericRepresentation() throws Exception { .endObject() .string(); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index 3f0f4a87792d3..f5b83d1bd6eb1 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -205,7 +205,7 @@ public void testDefaultPositionIncrementGap() throws IOException { .endObject().endObject().string(); DocumentMapper mapper = indexService.mapperService().merge("type", - new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); @@ -247,7 +247,7 @@ public void testPositionIncrementGap() throws IOException { .endObject().endObject().string(); DocumentMapper mapper = indexService.mapperService().merge("type", - new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java index d941b5a0469b9..4687a3a24ef74 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java @@ -66,7 +66,7 @@ public void testDocValues(boolean singleType) throws IOException { .put("index.version.created", Version.V_5_6_0) .build(); MapperService mapperService = createIndex("test", indexSettings).mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); Directory dir = newDirectory(); @@ -93,7 +93,7 @@ public void testDefaultsMultipleTypes() throws IOException { .put("index.version.created", Version.V_5_6_0) .build(); MapperService mapperService = createIndex("test", indexSettings).mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); IndexableField[] fields = document.rootDoc().getFields(TypeFieldMapper.NAME); assertEquals(IndexOptions.DOCS, fields[0].fieldType().indexOptions()); @@ -103,7 +103,7 @@ public void testDefaultsMultipleTypes() throws IOException { public void testDefaultsSingleType() throws IOException { Settings indexSettings = Settings.EMPTY; MapperService mapperService = createIndex("test", indexSettings).mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); assertEquals(Collections.emptyList(), Arrays.asList(document.rootDoc().getFields(TypeFieldMapper.NAME))); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/UidFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/UidFieldMapperTests.java index c5816de2e1920..4128cec082e0a 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/UidFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/UidFieldMapperTests.java @@ -48,7 +48,7 @@ public void testDefaultsMultipleTypes() throws IOException { .put("index.version.created", Version.V_5_6_0) .build(); MapperService mapperService = createIndex("test", indexSettings).mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); IndexableField[] fields = document.rootDoc().getFields(UidFieldMapper.NAME); assertEquals(1, fields.length); @@ -60,7 +60,7 @@ public void testDefaultsMultipleTypes() throws IOException { public void testDefaultsSingleType() throws IOException { Settings indexSettings = Settings.EMPTY; MapperService mapperService = createIndex("test", indexSettings).mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); assertEquals(Collections.emptyList(), Arrays.asList(document.rootDoc().getFields(UidFieldMapper.NAME))); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java index c6a1eae036ada..c21fffc1bb167 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java @@ -76,7 +76,7 @@ protected void testConflictWhileMergingAndMappingUnchanged(XContentBuilder mappi CompressedXContent mappingBeforeUpdate = indexService.mapperService().documentMapper("type").mappingSource(); // simulate like in MetaDataMappingService#putMapping try { - indexService.mapperService().merge("type", new CompressedXContent(mappingUpdate.bytes()), MapperService.MergeReason.MAPPING_UPDATE, false); + indexService.mapperService().merge("type", new CompressedXContent(mappingUpdate.bytes()), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { // expected @@ -97,14 +97,14 @@ public void testConflictSameType() throws Exception { .endObject().endObject().endObject(); try { - mapperService.merge("type", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("mapper [foo] cannot be changed from type [long] to [double]")); } try { - mapperService.merge("type", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("mapper [foo] cannot be changed from type [long] to [double]")); @@ -125,7 +125,7 @@ public void testConflictNewType() throws Exception { .endObject().endObject().endObject(); try { - mapperService.merge("type2", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type2", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { // expected @@ -133,7 +133,7 @@ public void testConflictNewType() throws Exception { } try { - mapperService.merge("type2", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type2", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { // expected @@ -154,15 +154,15 @@ public void testConflictNewTypeUpdate() throws Exception { MapperService mapperService = createIndex("test", Settings.builder().put("index.version.created", Version.V_5_6_0).build()).mapperService(); - mapperService.merge("type1", new CompressedXContent(mapping1.string()), MapperService.MergeReason.MAPPING_UPDATE, false); - mapperService.merge("type2", new CompressedXContent(mapping2.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type1", new CompressedXContent(mapping1.string()), MapperService.MergeReason.MAPPING_UPDATE); + mapperService.merge("type2", new CompressedXContent(mapping2.string()), MapperService.MergeReason.MAPPING_UPDATE); XContentBuilder update = XContentFactory.jsonBuilder().startObject().startObject("type2") .startObject("properties").startObject("foo").field("type", "double").endObject() .endObject().endObject().endObject(); try { - mapperService.merge("type2", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type2", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { // expected @@ -170,7 +170,7 @@ public void testConflictNewTypeUpdate() throws Exception { } try { - mapperService.merge("type2", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type2", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { // expected @@ -190,14 +190,14 @@ public void testReuseMetaField() throws IOException { MapperService mapperService = createIndex("test", Settings.builder().build()).mapperService(); try { - mapperService.merge("type", new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type", new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Field [_id] is defined twice in [type]")); } try { - mapperService.merge("type", new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type", new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Field [_id] is defined twice in [type]")); @@ -223,16 +223,16 @@ public void testRejectFieldDefinedTwice() throws IOException { .endObject().endObject().string(); MapperService mapperService1 = createIndex("test1").mapperService(); - mapperService1.merge("type1", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE, false); + mapperService1.merge("type1", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> mapperService1.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false)); + () -> mapperService1.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), equalTo("[foo] is defined as a field in mapping [type2" + "] but this name is already used for an object in other types")); MapperService mapperService2 = createIndex("test2").mapperService(); - mapperService2.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false); + mapperService2.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); e = expectThrows(IllegalArgumentException.class, - () -> mapperService2.merge("type1", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE, false)); + () -> mapperService2.merge("type1", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), equalTo("[foo] is defined as an object in mapping [type1" + "] but this name is already used for a field in other types")); } diff --git a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java index 8849c91ddb368..00a9753b6f874 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java @@ -355,7 +355,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws "string_boost", "type=text,boost=4", "string_no_pos", "type=text,index_options=docs").string() ), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); } public void testMatchPhrasePrefixWithBoost() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java index 31a749161074a..c199bf02dd264 100644 --- a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java @@ -67,7 +67,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws OBJECT_FIELD_NAME, "type=object", GEO_POINT_FIELD_NAME, "type=geo_point", "nested1", "type=nested" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); } /** diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 1b8cef1ab5355..3093031fbca96 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -835,7 +835,7 @@ public void testDisabledFieldNamesField() throws Exception { PutMappingRequest.buildFromSimplifiedDef("_doc", "foo", "type=text", "_field_names", "enabled=false").string()), - MapperService.MergeReason.MAPPING_UPDATE, true); + MapperService.MergeReason.MAPPING_UPDATE); try { QueryStringQueryBuilder queryBuilder = new QueryStringQueryBuilder("foo:*"); Query query = queryBuilder.toQuery(context); @@ -848,7 +848,7 @@ public void testDisabledFieldNamesField() throws Exception { PutMappingRequest.buildFromSimplifiedDef("_doc", "foo", "type=text", "_field_names", "enabled=true").string()), - MapperService.MergeReason.MAPPING_UPDATE, true); + MapperService.MergeReason.MAPPING_UPDATE); } } diff --git a/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java index 2230436b18ef4..3668c7dec17a0 100644 --- a/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java @@ -65,13 +65,13 @@ protected RangeQueryBuilder doCreateTestQueryBuilder() { switch (randomIntBetween(0, 2)) { case 0: // use mapped integer field for numeric range queries - query = new RangeQueryBuilder(INT_FIELD_NAME); + query = new RangeQueryBuilder(randomBoolean() ? INT_FIELD_NAME : INT_RANGE_FIELD_NAME); query.from(randomIntBetween(1, 100)); query.to(randomIntBetween(101, 200)); break; case 1: // use mapped date field, using date string representation - query = new RangeQueryBuilder(DATE_FIELD_NAME); + query = new RangeQueryBuilder(randomBoolean() ? DATE_FIELD_NAME : DATE_RANGE_FIELD_NAME); query.from(new DateTime(System.currentTimeMillis() - randomIntBetween(0, 1000000), DateTimeZone.UTC).toString()); query.to(new DateTime(System.currentTimeMillis() + randomIntBetween(0, 1000000), DateTimeZone.UTC).toString()); // Create timestamp option only then we have a date mapper, @@ -99,6 +99,10 @@ protected RangeQueryBuilder doCreateTestQueryBuilder() { if (randomBoolean()) { query.to(null); } + if (query.fieldName().equals(INT_RANGE_FIELD_NAME) || query.fieldName().equals(DATE_RANGE_FIELD_NAME)) { + query.relation( + randomFrom(ShapeRelation.CONTAINS.toString(), ShapeRelation.INTERSECTS.toString(), ShapeRelation.WITHIN.toString())); + } return query; } @@ -143,7 +147,9 @@ protected void doAssertLuceneQuery(RangeQueryBuilder queryBuilder, Query query, } else if (getCurrentTypes().length == 0 || (queryBuilder.fieldName().equals(DATE_FIELD_NAME) == false - && queryBuilder.fieldName().equals(INT_FIELD_NAME) == false)) { + && queryBuilder.fieldName().equals(INT_FIELD_NAME) == false + && queryBuilder.fieldName().equals(DATE_RANGE_FIELD_NAME) == false + && queryBuilder.fieldName().equals(INT_RANGE_FIELD_NAME) == false)) { assertThat(query, instanceOf(TermRangeQuery.class)); TermRangeQuery termRangeQuery = (TermRangeQuery) query; assertThat(termRangeQuery.getField(), equalTo(queryBuilder.fieldName())); @@ -219,6 +225,8 @@ protected void doAssertLuceneQuery(RangeQueryBuilder queryBuilder, Query query, maxInt--; } } + } else if (queryBuilder.fieldName().equals(DATE_RANGE_FIELD_NAME) || queryBuilder.fieldName().equals(INT_RANGE_FIELD_NAME)) { + // todo can't check RangeFieldQuery because its currently package private (this will change) } else { throw new UnsupportedOperationException(); } diff --git a/server/src/test/java/org/elasticsearch/index/query/RangeQueryRewriteTests.java b/server/src/test/java/org/elasticsearch/index/query/RangeQueryRewriteTests.java index 1a28441dc170a..95eb8a2d6325f 100644 --- a/server/src/test/java/org/elasticsearch/index/query/RangeQueryRewriteTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/RangeQueryRewriteTests.java @@ -52,7 +52,7 @@ public void testRewriteMissingReader() throws Exception { .endObject() .endObject().endObject().string(); indexService.mapperService().merge("type", - new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); QueryRewriteContext context = new QueryShardContext(0, indexService.getIndexSettings(), null, null, indexService.mapperService(), null, null, xContentRegistry(), writableRegistry(), null, null, null, null); RangeQueryBuilder range = new RangeQueryBuilder("foo"); @@ -70,7 +70,7 @@ public void testRewriteEmptyReader() throws Exception { .endObject() .endObject().endObject().string(); indexService.mapperService().merge("type", - new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); IndexReader reader = new MultiReader(); QueryRewriteContext context = new QueryShardContext(0, indexService.getIndexSettings(), null, null, indexService.mapperService(), null, null, xContentRegistry(), writableRegistry(), null, reader, null, null); diff --git a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java index bfc6fd0600493..dc7c56ce04ebf 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java @@ -46,15 +46,18 @@ import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -607,6 +610,21 @@ public void testToFuzzyQuery() throws Exception { assertEquals(expected, query); } + public void testLenientToPrefixQuery() throws Exception { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + + Query query = new SimpleQueryStringBuilder("t*") + .field(DATE_FIELD_NAME) + .field(STRING_FIELD_NAME) + .lenient(true) + .toQuery(createShardContext()); + List expectedQueries = new ArrayList<>(); + expectedQueries.add(new MatchNoDocsQuery("")); + expectedQueries.add(new PrefixQuery(new Term(STRING_FIELD_NAME, "t"))); + DisjunctionMaxQuery expected = new DisjunctionMaxQuery(expectedQueries, 1.0f); + assertEquals(expected, query); + } + private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings, Settings indexSettings) { Settings build = Settings.builder().put(oldIndexSettings) .put(indexSettings) diff --git a/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java index 79f9af61408b2..c945e595213fd 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java @@ -19,12 +19,12 @@ package org.elasticsearch.index.query; -import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.PointInSetQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.get.GetRequest; @@ -77,9 +77,8 @@ protected TermsQueryBuilder doCreateTestQueryBuilder() { if (randomBoolean()) { // make between 0 and 5 different values of the same type String fieldName; - do { - fieldName = getRandomFieldName(); - } while (fieldName.equals(GEO_POINT_FIELD_NAME) || fieldName.equals(GEO_SHAPE_FIELD_NAME)); + fieldName = randomValueOtherThanMany(choice -> choice.equals(GEO_POINT_FIELD_NAME) || choice.equals(GEO_SHAPE_FIELD_NAME) + || choice.equals(INT_RANGE_FIELD_NAME) || choice.equals(DATE_RANGE_FIELD_NAME), () -> getRandomFieldName()); Object[] values = new Object[randomInt(5)]; for (int i = 0; i < values.length; i++) { values[i] = getRandomValueForFieldName(fieldName); diff --git a/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java index 61336028779d9..91de39ecffff4 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java @@ -76,7 +76,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws String docType = "_doc"; mapperService.merge(docType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(docType, "m_s_m", "type=long" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index e80c2df4ea060..a091cd44c4a7f 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -657,7 +657,7 @@ protected PrimaryResult performOnPrimary( @Override protected void performOnReplica(final GlobalCheckpointSyncAction.Request request, final IndexShard replica) throws IOException { - replica.getTranslog().sync(); + replica.sync(); } } diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index aa97c2049915f..cd948ed9f9036 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -304,8 +304,52 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { replica.store().close(); newReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId()); shards.recoverReplica(newReplica); - shards.assertAllEqual(totalDocs); + // Make sure that flushing on a recovering shard is ok. + shards.flush(); + shards.assertAllEqual(totalDocs); + } + } + + public void testReplicaRollbackStaleDocumentsInPeerRecovery() throws Exception { + try (ReplicationGroup shards = createGroup(2)) { + shards.startAll(); + IndexShard oldPrimary = shards.getPrimary(); + IndexShard newPrimary = shards.getReplicas().get(0); + IndexShard replica = shards.getReplicas().get(1); + int goodDocs = shards.indexDocs(scaledRandomIntBetween(1, 20)); + shards.flush(); + // simulate docs that were inflight when primary failed, these will be rolled back + int staleDocs = scaledRandomIntBetween(1, 10); + logger.info("--> indexing {} stale docs", staleDocs); + for (int i = 0; i < staleDocs; i++) { + final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "stale_" + i) + .source("{}", XContentType.JSON); + final BulkShardRequest bulkShardRequest = indexOnPrimary(indexRequest, oldPrimary); + indexOnReplica(bulkShardRequest, replica); + } + shards.flush(); + shards.promoteReplicaToPrimary(newPrimary).get(); + // Recover a replica should rollback the stale documents + shards.removeReplica(replica); + replica.close("recover replica - first time", false); + replica.store().close(); + replica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId()); + shards.recoverReplica(replica); + shards.assertAllEqual(goodDocs); + // Index more docs - move the global checkpoint >= seqno of the stale operations. + goodDocs += shards.indexDocs(scaledRandomIntBetween(staleDocs, staleDocs * 5)); + shards.syncGlobalCheckpoint(); + assertThat(replica.getTranslog().getLastSyncedGlobalCheckpoint(), equalTo(replica.seqNoStats().getMaxSeqNo())); + // Recover a replica again should also rollback the stale documents. + shards.removeReplica(replica); + replica.close("recover replica - second time", false); + replica.store().close(); + IndexShard anotherReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId()); + shards.recoverReplica(anotherReplica); + shards.assertAllEqual(goodDocs); + shards.flush(); + shards.assertAllEqual(goodDocs); } } diff --git a/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java b/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java index 1f033b5fb4187..ffd6c347e21e9 100644 --- a/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java @@ -87,7 +87,7 @@ public void setup() throws IOException { " }\n" + " }\n" + "}"; - mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); this.indexService = indexService; } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/CountedBitSetTests.java b/server/src/test/java/org/elasticsearch/index/seqno/CountedBitSetTests.java index b014f82740640..bc4f58034d1dc 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/CountedBitSetTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/CountedBitSetTests.java @@ -26,9 +26,7 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; -import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.lessThan; public class CountedBitSetTests extends ESTestCase { @@ -55,7 +53,6 @@ public void testReleaseInternalBitSet() { int numBits = (short) randomIntBetween(8, 4096); final CountedBitSet countedBitSet = new CountedBitSet((short) numBits); final List values = IntStream.range(0, numBits).boxed().collect(Collectors.toList()); - final long ramBytesUsedWithBitSet = countedBitSet.ramBytesUsed(); for (int i = 1; i < numBits; i++) { final int value = values.get(i); @@ -68,7 +65,6 @@ public void testReleaseInternalBitSet() { assertThat(countedBitSet.isInternalBitsetReleased(), equalTo(false)); assertThat(countedBitSet.length(), equalTo(numBits)); assertThat(countedBitSet.cardinality(), equalTo(i)); - assertThat(countedBitSet.ramBytesUsed(), equalTo(ramBytesUsedWithBitSet)); } // The missing piece to fill all bits. @@ -83,7 +79,6 @@ public void testReleaseInternalBitSet() { assertThat(countedBitSet.isInternalBitsetReleased(), equalTo(true)); assertThat(countedBitSet.length(), equalTo(numBits)); assertThat(countedBitSet.cardinality(), equalTo(numBits)); - assertThat(countedBitSet.ramBytesUsed(), allOf(equalTo(CountedBitSet.BASE_RAM_BYTES_USED), lessThan(ramBytesUsedWithBitSet))); } // Tests with released internal bitset. diff --git a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java index 618714fc9d959..3fc62673de0ce 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java @@ -123,9 +123,9 @@ public void testTranslogSyncAfterGlobalCheckpointSync() throws Exception { } if (durability == Translog.Durability.ASYNC || lastSyncedGlobalCheckpoint == globalCheckpoint) { - verify(translog, never()).sync(); + verify(indexShard, never()).sync(); } else { - verify(translog).sync(); + verify(indexShard).sync(); } } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java b/server/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java index 31b8c23bf1c79..932fb71790800 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.seqno; import com.carrotsearch.hppc.LongObjectHashMap; -import org.apache.lucene.util.BitSet; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -260,7 +259,7 @@ public void testResetCheckpoint() { tracker.resetCheckpoint(localCheckpoint); assertThat(tracker.getCheckpoint(), equalTo((long) localCheckpoint)); assertThat(tracker.getMaxSeqNo(), equalTo((long) maxSeqNo)); - assertThat(tracker.processedSeqNo, new BaseMatcher>() { + assertThat(tracker.processedSeqNo, new BaseMatcher>() { @Override public boolean matches(Object item) { return (item instanceof LongObjectHashMap && ((LongObjectHashMap) item).isEmpty()); diff --git a/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index 9c501f6c02c05..029950fda54b9 100644 --- a/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -353,7 +353,6 @@ public void testUpdateMappingOnAllTypes() { assertAcked(client().admin().indices().preparePutMapping("index") .setType("type1") - .setUpdateAllTypes(true) .setSource("f", "type=keyword,null_value=n/a") .get()); diff --git a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index dd4635d30f24d..f25a9234698b2 100644 --- a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -22,7 +22,6 @@ import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.stats.CommonStats; @@ -37,6 +36,7 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -1113,7 +1113,8 @@ public void testConcurrentIndexingAndStatsRequests() throws BrokenBarrierExcepti final CountDownLatch latch = new CountDownLatch(1); final AtomicBoolean failed = new AtomicBoolean(); - final AtomicReference> shardFailures = new AtomicReference<>(new CopyOnWriteArrayList<>()); + final AtomicReference> shardFailures = + new AtomicReference<>(new CopyOnWriteArrayList<>()); final AtomicReference> executionFailures = new AtomicReference<>(new CopyOnWriteArrayList<>()); // increasing the number of shards increases the number of chances any one stats request will hit a race @@ -1191,7 +1192,7 @@ public void testConcurrentIndexingAndStatsRequests() throws BrokenBarrierExcepti thread.join(); } - assertThat(shardFailures.get(), emptyCollectionOf(ShardOperationFailedException.class)); + assertThat(shardFailures.get(), emptyCollectionOf(DefaultShardOperationFailedException.class)); assertThat(executionFailures.get(), emptyCollectionOf(Exception.class)); } diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java index 148af7f7d875f..ffebd804c609c 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java @@ -19,8 +19,8 @@ package org.elasticsearch.rest.action.cat; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource; @@ -110,7 +110,7 @@ public void testRestRecoveryAction() { Randomness.shuffle(shuffle); shardRecoveryStates.put("index", shuffle); - final List shardFailures = new ArrayList<>(); + final List shardFailures = new ArrayList<>(); final RecoveryResponse response = new RecoveryResponse( totalShards, successfulShards, diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java index 172aebbc0e5dc..0ebf957a8ddd1 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java @@ -39,6 +39,7 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.TestUtil; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; @@ -68,6 +69,9 @@ import java.util.function.Consumer; import java.util.function.Supplier; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; + public class CompositeAggregatorTests extends AggregatorTestCase { private static MappedFieldType[] FIELD_TYPES; @@ -761,6 +765,89 @@ public void testWithDateHistogram() throws IOException { ); } + public void testWithDateHistogramAndFormat() throws IOException { + final List>> dataset = new ArrayList<>(); + dataset.addAll( + Arrays.asList( + createDocument("date", asLong("2017-10-20T03:08:45")), + createDocument("date", asLong("2016-09-20T09:00:34")), + createDocument("date", asLong("2016-09-20T11:34:00")), + createDocument("date", asLong("2017-10-20T06:09:24")), + createDocument("date", asLong("2017-10-19T06:09:24")), + createDocument("long", 4L) + ) + ); + final Sort sort = new Sort(new SortedNumericSortField("date", SortField.Type.LONG)); + testSearchCase(new MatchAllDocsQuery(), sort, dataset, + () -> { + DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date") + .field("date") + .dateHistogramInterval(DateHistogramInterval.days(1)) + .format("yyyy-MM-dd"); + return new CompositeAggregationBuilder("name", Collections.singletonList(histo)); + }, + (result) -> { + assertEquals(3, result.getBuckets().size()); + assertEquals("{date=2016-09-20}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + assertEquals("{date=2017-10-19}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertEquals("{date=2017-10-20}", result.getBuckets().get(2).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(2).getDocCount()); + } + ); + + testSearchCase(new MatchAllDocsQuery(), sort, dataset, + () -> { + DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date") + .field("date") + .dateHistogramInterval(DateHistogramInterval.days(1)) + .format("yyyy-MM-dd"); + return new CompositeAggregationBuilder("name", Collections.singletonList(histo)) + .aggregateAfter(createAfterKey("date", "2016-09-20")); + + }, (result) -> { + assertEquals(2, result.getBuckets().size()); + assertEquals("{date=2017-10-19}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(0).getDocCount()); + assertEquals("{date=2017-10-20}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(1).getDocCount()); + } + ); + } + + public void testThatDateHistogramFailsFormatAfter() throws IOException { + ElasticsearchParseException exc = expectThrows(ElasticsearchParseException.class, + () -> testSearchCase(new MatchAllDocsQuery(), null, Collections.emptyList(), + () -> { + DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date") + .field("date") + .dateHistogramInterval(DateHistogramInterval.days(1)) + .format("yyyy-MM-dd"); + return new CompositeAggregationBuilder("name", Collections.singletonList(histo)) + .aggregateAfter(createAfterKey("date", "now")); + }, + (result) -> {} + )); + assertThat(exc.getCause(), instanceOf(IllegalArgumentException.class)); + assertThat(exc.getCause().getMessage(), containsString("now() is not supported in [after] key")); + + exc = expectThrows(ElasticsearchParseException.class, + () -> testSearchCase(new MatchAllDocsQuery(), null, Collections.emptyList(), + () -> { + DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date") + .field("date") + .dateHistogramInterval(DateHistogramInterval.days(1)) + .format("yyyy-MM-dd"); + return new CompositeAggregationBuilder("name", Collections.singletonList(histo)) + .aggregateAfter(createAfterKey("date", "1474329600000")); + }, + (result) -> {} + )); + assertThat(exc.getCause(), instanceOf(IllegalArgumentException.class)); + assertThat(exc.getCause().getMessage(), containsString("Parse failure")); + } + public void testWithDateHistogramAndTimeZone() throws IOException { final List>> dataset = new ArrayList<>(); dataset.addAll( diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java index 10cc5b8016dc5..322b70cb2d971 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java @@ -21,12 +21,15 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.ParsedAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalMultiBucketAggregationTestCase; +import org.joda.time.DateTimeZone; import org.junit.After; import java.io.IOException; @@ -41,28 +44,45 @@ import java.util.stream.Collectors; import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiLettersOfLengthBetween; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomLongBetween; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; public class InternalCompositeTests extends InternalMultiBucketAggregationTestCase { private List sourceNames; + private List formats; private int[] reverseMuls; - private int[] formats; + private int[] types; private int size; + private static DocValueFormat randomDocValueFormat(boolean isLong) { + if (isLong) { + // we use specific format only for date histogram on a long/date field + if (randomBoolean()) { + return new DocValueFormat.DateTime(Joda.forPattern("epoch_second"), DateTimeZone.forOffsetHours(1)); + } else { + return DocValueFormat.RAW; + } + } else { + // and the raw format for the other types + return DocValueFormat.RAW; + } + } + @Override public void setUp() throws Exception { super.setUp(); int numFields = randomIntBetween(1, 10); size = randomNumberOfBuckets(); sourceNames = new ArrayList<>(); + formats = new ArrayList<>(); reverseMuls = new int[numFields]; - formats = new int[numFields]; + types = new int[numFields]; for (int i = 0; i < numFields; i++) { sourceNames.add("field_" + i); reverseMuls[i] = randomBoolean() ? 1 : -1; - formats[i] = randomIntBetween(0, 2); + int type = randomIntBetween(0, 2); + types[i] = type; + formats.add(randomDocValueFormat(type == 0)); } } @@ -70,9 +90,10 @@ public void setUp() throws Exception { @After public void tearDown() throws Exception { super.tearDown(); - sourceNames= null; - reverseMuls = null; + sourceNames = null; formats = null; + reverseMuls = null; + types = null; } @Override @@ -93,7 +114,7 @@ protected

P parseAndAssert(final InternalAggregati private CompositeKey createCompositeKey() { Comparable[] keys = new Comparable[sourceNames.size()]; for (int j = 0; j < keys.length; j++) { - switch (formats[j]) { + switch (types[j]) { case 0: keys[j] = randomLong(); break; @@ -123,19 +144,6 @@ private Comparator getKeyComparator() { }; } - @SuppressWarnings("unchecked") - private Comparator getBucketComparator() { - return (o1, o2) -> { - for (int i = 0; i < o1.getRawKey().size(); i++) { - int cmp = ((Comparable) o1.getRawKey().get(i)).compareTo(o2.getRawKey().get(i)) * reverseMuls[i]; - if (cmp != 0) { - return cmp; - } - } - return 0; - }; - } - @Override protected InternalComposite createTestInstance(String name, List pipelineAggregators, Map metaData, InternalAggregations aggregations) { @@ -149,11 +157,11 @@ protected InternalComposite createTestInstance(String name, List o1.compareKey(o2)); - return new InternalComposite(name, size, sourceNames, buckets, reverseMuls, Collections.emptyList(), metaData); + return new InternalComposite(name, size, sourceNames, formats, buckets, reverseMuls, Collections.emptyList(), metaData); } @Override @@ -172,7 +180,7 @@ protected InternalComposite mutateInstance(InternalComposite instance) throws IO break; case 1: buckets = new ArrayList<>(buckets); - buckets.add(new InternalComposite.InternalBucket(sourceNames, createCompositeKey(), reverseMuls, + buckets.add(new InternalComposite.InternalBucket(sourceNames, formats, createCompositeKey(), reverseMuls, randomLongBetween(1, 100), InternalAggregations.EMPTY) ); break; @@ -187,7 +195,7 @@ protected InternalComposite mutateInstance(InternalComposite instance) throws IO default: throw new AssertionError("illegal branch"); } - return new InternalComposite(instance.getName(), instance.getSize(), sourceNames, buckets, reverseMuls, + return new InternalComposite(instance.getName(), instance.getSize(), sourceNames, formats, buckets, reverseMuls, instance.pipelineAggregators(), metaData); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java index 10b306ad7177c..144305647ebaf 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.document.Document; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; @@ -38,6 +39,8 @@ import java.io.IOException; import java.util.function.Consumer; +import static java.util.Collections.singleton; + public class ExtendedStatsAggregatorTests extends AggregatorTestCase { private static final double TOLERANCE = 1e-5; @@ -132,6 +135,68 @@ public void testRandomLongs() throws IOException { ); } + public void testSummationAccuracy() throws IOException { + double[] values = new double[]{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.9, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7}; + verifyStatsOfDoubles(values, 13.5, 16.21, 0d); + + // Summing up an array which contains NaN and infinities and expect a result same as naive summation + int n = randomIntBetween(5, 10); + values = new double[n]; + double sum = 0; + double sumOfSqrs = 0; + for (int i = 0; i < n; i++) { + values[i] = frequently() + ? randomFrom(Double.NaN, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY) + : randomDoubleBetween(Double.MIN_VALUE, Double.MAX_VALUE, true); + sum += values[i]; + sumOfSqrs += values[i] * values[i]; + } + verifyStatsOfDoubles(values, sum, sumOfSqrs, TOLERANCE); + + // Summing up some big double values and expect infinity result + n = randomIntBetween(5, 10); + double[] largeValues = new double[n]; + for (int i = 0; i < n; i++) { + largeValues[i] = Double.MAX_VALUE; + } + verifyStatsOfDoubles(largeValues, Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY, 0d); + + for (int i = 0; i < n; i++) { + largeValues[i] = -Double.MAX_VALUE; + } + verifyStatsOfDoubles(largeValues, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, 0d); + } + + private void verifyStatsOfDoubles(double[] values, double expectedSum, + double expectedSumOfSqrs, double delta) throws IOException { + MappedFieldType ft = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE); + final String fieldName = "field"; + ft.setName(fieldName); + double max = Double.NEGATIVE_INFINITY; + double min = Double.POSITIVE_INFINITY; + for (double value : values) { + max = Math.max(max, value); + min = Math.min(min, value); + } + double expectedMax = max; + double expectedMin = min; + testCase(ft, + iw -> { + for (double value : values) { + iw.addDocument(singleton(new NumericDocValuesField(fieldName, NumericUtils.doubleToSortableLong(value)))); + } + }, + stats -> { + assertEquals(values.length, stats.getCount()); + assertEquals(expectedSum / values.length, stats.getAvg(), delta); + assertEquals(expectedSum, stats.getSum(), delta); + assertEquals(expectedSumOfSqrs, stats.getSumOfSquares(), delta); + assertEquals(expectedMax, stats.getMax(), 0d); + assertEquals(expectedMin, stats.getMin(), 0d); + } + ); + } + public void testCase(MappedFieldType ft, CheckedConsumer buildIndex, Consumer verify) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStatsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStatsTests.java index 143ad4553c7dd..6178a72c83e3e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStatsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStatsTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.ParsedAggregation; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats.Bounds; import org.elasticsearch.search.aggregations.metrics.stats.extended.InternalExtendedStats; @@ -28,6 +29,7 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -188,4 +190,44 @@ protected InternalExtendedStats mutateInstance(InternalExtendedStats instance) { } return new InternalExtendedStats(name, count, sum, min, max, sumOfSqrs, sigma, formatter, pipelineAggregators, metaData); } + + public void testSummationAccuracy() { + double[] values = new double[]{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.9, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7}; + verifySumOfSqrsOfDoubles(values, 13.5, 0d); + + int n = randomIntBetween(5, 10); + values = new double[n]; + double sum = 0; + for (int i = 0; i < n; i++) { + values[i] = frequently() + ? randomFrom(Double.NaN, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY) + : randomDoubleBetween(Double.MIN_VALUE, Double.MAX_VALUE, true); + sum += values[i]; + } + verifySumOfSqrsOfDoubles(values, sum, TOLERANCE); + + // Summing up some big double values and expect infinity result + n = randomIntBetween(5, 10); + double[] largeValues = new double[n]; + for (int i = 0; i < n; i++) { + largeValues[i] = Double.MAX_VALUE; + } + verifySumOfSqrsOfDoubles(largeValues, Double.POSITIVE_INFINITY, 0d); + + for (int i = 0; i < n; i++) { + largeValues[i] = -Double.MAX_VALUE; + } + verifySumOfSqrsOfDoubles(largeValues, Double.NEGATIVE_INFINITY, 0d); + } + + private void verifySumOfSqrsOfDoubles(double[] values, double expectedSumOfSqrs, double delta) { + List aggregations = new ArrayList<>(values.length); + double sigma = randomDouble(); + for (double sumOfSqrs : values) { + aggregations.add(new InternalExtendedStats("dummy1", 1, 0.0, 0.0, 0.0, sumOfSqrs, sigma, null, null, null)); + } + InternalExtendedStats stats = new InternalExtendedStats("dummy", 1, 0.0, 0.0, 0.0, 0.0, sigma, null, null, null); + InternalExtendedStats reduced = stats.doReduce(aggregations, null); + assertEquals(expectedSumOfSqrs, reduced.getSumOfSquares(), delta); + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsTests.java index 4ce29e4e0ed83..369fd671eb93c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.ParsedAggregation; import org.elasticsearch.search.aggregations.metrics.stats.InternalStats; import org.elasticsearch.search.aggregations.metrics.stats.ParsedStats; @@ -30,6 +31,7 @@ import org.elasticsearch.test.InternalAggregationTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -48,7 +50,7 @@ protected InternalStats createTestInstance(String name, List } protected InternalStats createInstance(String name, long count, double sum, double min, double max, DocValueFormat formatter, - List pipelineAggregators, Map metaData) { + List pipelineAggregators, Map metaData) { return new InternalStats(name, count, sum, min, max, formatter, pipelineAggregators, metaData); } @@ -74,6 +76,54 @@ protected void assertReduced(InternalStats reduced, List inputs) assertEquals(expectedMax, reduced.getMax(), 0d); } + public void testSummationAccuracy() { + double[] values = new double[]{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.9, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7}; + verifyStatsOfDoubles(values, 13.5, 0.9, 0d); + + int n = randomIntBetween(5, 10); + values = new double[n]; + double sum = 0; + for (int i = 0; i < n; i++) { + values[i] = frequently() + ? randomFrom(Double.NaN, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY) + : randomDoubleBetween(Double.MIN_VALUE, Double.MAX_VALUE, true); + sum += values[i]; + } + verifyStatsOfDoubles(values, sum, sum / n, TOLERANCE); + + // Summing up some big double values and expect infinity result + n = randomIntBetween(5, 10); + double[] largeValues = new double[n]; + for (int i = 0; i < n; i++) { + largeValues[i] = Double.MAX_VALUE; + } + verifyStatsOfDoubles(largeValues, Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY, 0d); + + for (int i = 0; i < n; i++) { + largeValues[i] = -Double.MAX_VALUE; + } + verifyStatsOfDoubles(largeValues, Double.NEGATIVE_INFINITY, Double.NEGATIVE_INFINITY, 0d); + } + + private void verifyStatsOfDoubles(double[] values, double expectedSum, double expectedAvg, double delta) { + List aggregations = new ArrayList<>(values.length); + double max = Double.NEGATIVE_INFINITY; + double min = Double.POSITIVE_INFINITY; + for (double value : values) { + max = Math.max(max, value); + min = Math.min(min, value); + aggregations.add(new InternalStats("dummy1", 1, value, value, value, null, null, null)); + } + InternalStats internalStats = new InternalStats("dummy2", 0, 0.0, 2.0, 0.0, null, null, null); + InternalStats reduced = internalStats.doReduce(aggregations, null); + assertEquals("dummy2", reduced.getName()); + assertEquals(values.length, reduced.getCount()); + assertEquals(expectedSum, reduced.getSum(), delta); + assertEquals(expectedAvg, reduced.getAvg(), delta); + assertEquals(min, reduced.getMin(), 0d); + assertEquals(max, reduced.getMax(), 0d); + } + @Override protected void assertFromXContent(InternalStats aggregation, ParsedAggregation parsedAggregation) { assertTrue(parsedAggregation instanceof ParsedStats); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalSumTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalSumTests.java index feeefac4daa55..884f9bfbe0d20 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalSumTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalSumTests.java @@ -20,12 +20,14 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.ParsedAggregation; import org.elasticsearch.search.aggregations.metrics.sum.InternalSum; import org.elasticsearch.search.aggregations.metrics.sum.ParsedSum; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -34,7 +36,7 @@ public class InternalSumTests extends InternalAggregationTestCase { @Override protected InternalSum createTestInstance(String name, List pipelineAggregators, Map metaData) { - double value = frequently() ? randomDouble() : randomFrom(new Double[] { Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY }); + double value = frequently() ? randomDouble() : randomFrom(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, Double.NaN); DocValueFormat formatter = randomFrom(new DocValueFormat.Decimal("###.##"), DocValueFormat.BOOLEAN, DocValueFormat.RAW); return new InternalSum(name, value, formatter, pipelineAggregators, metaData); } @@ -50,6 +52,47 @@ protected void assertReduced(InternalSum reduced, List inputs) { assertEquals(expectedSum, reduced.getValue(), 0.0001d); } + public void testSummationAccuracy() { + // Summing up a normal array and expect an accurate value + double[] values = new double[]{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.9, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7}; + verifySummationOfDoubles(values, 13.5, 0d); + + // Summing up an array which contains NaN and infinities and expect a result same as naive summation + int n = randomIntBetween(5, 10); + values = new double[n]; + double sum = 0; + for (int i = 0; i < n; i++) { + values[i] = frequently() + ? randomFrom(Double.NaN, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY) + : randomDoubleBetween(Double.MIN_VALUE, Double.MAX_VALUE, true); + sum += values[i]; + } + verifySummationOfDoubles(values, sum, TOLERANCE); + + // Summing up some big double values and expect infinity result + n = randomIntBetween(5, 10); + double[] largeValues = new double[n]; + for (int i = 0; i < n; i++) { + largeValues[i] = Double.MAX_VALUE; + } + verifySummationOfDoubles(largeValues, Double.POSITIVE_INFINITY, 0d); + + for (int i = 0; i < n; i++) { + largeValues[i] = -Double.MAX_VALUE; + } + verifySummationOfDoubles(largeValues, Double.NEGATIVE_INFINITY, 0d); + } + + private void verifySummationOfDoubles(double[] values, double expected, double delta) { + List aggregations = new ArrayList<>(values.length); + for (double value : values) { + aggregations.add(new InternalSum("dummy1", value, null, null, null)); + } + InternalSum internalSum = new InternalSum("dummy", 0, null, null, null); + InternalSum reduced = internalSum.doReduce(aggregations, null); + assertEquals(expected, reduced.value(), delta); + } + @Override protected void assertFromXContent(InternalSum sum, ParsedAggregation parsedAggregation) { ParsedSum parsed = ((ParsedSum) parsedAggregation); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java index 7286c7de0fed5..c5c1420fb2265 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.document.Document; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; @@ -36,6 +37,8 @@ import java.io.IOException; import java.util.function.Consumer; +import static java.util.Collections.singleton; + public class StatsAggregatorTests extends AggregatorTestCase { static final double TOLERANCE = 1e-10; @@ -113,6 +116,66 @@ public void testRandomLongs() throws IOException { ); } + public void testSummationAccuracy() throws IOException { + // Summing up a normal array and expect an accurate value + double[] values = new double[]{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7}; + verifySummationOfDoubles(values, 15.3, 0.9, 0d); + + // Summing up an array which contains NaN and infinities and expect a result same as naive summation + int n = randomIntBetween(5, 10); + values = new double[n]; + double sum = 0; + for (int i = 0; i < n; i++) { + values[i] = frequently() + ? randomFrom(Double.NaN, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY) + : randomDoubleBetween(Double.MIN_VALUE, Double.MAX_VALUE, true); + sum += values[i]; + } + verifySummationOfDoubles(values, sum, sum / n, TOLERANCE); + + // Summing up some big double values and expect infinity result + n = randomIntBetween(5, 10); + double[] largeValues = new double[n]; + for (int i = 0; i < n; i++) { + largeValues[i] = Double.MAX_VALUE; + } + verifySummationOfDoubles(largeValues, Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY, 0d); + + for (int i = 0; i < n; i++) { + largeValues[i] = -Double.MAX_VALUE; + } + verifySummationOfDoubles(largeValues, Double.NEGATIVE_INFINITY, Double.NEGATIVE_INFINITY, 0d); + } + + private void verifySummationOfDoubles(double[] values, double expectedSum, + double expectedAvg, double delta) throws IOException { + MappedFieldType ft = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE); + ft.setName("field"); + + double max = Double.NEGATIVE_INFINITY; + double min = Double.POSITIVE_INFINITY; + for (double value : values) { + max = Math.max(max, value); + min = Math.min(min, value); + } + double expectedMax = max; + double expectedMin = min; + testCase(ft, + iw -> { + for (double value : values) { + iw.addDocument(singleton(new NumericDocValuesField("field", NumericUtils.doubleToSortableLong(value)))); + } + }, + stats -> { + assertEquals(values.length, stats.getCount()); + assertEquals(expectedAvg, stats.getAvg(), delta); + assertEquals(expectedSum, stats.getSum(), delta); + assertEquals(expectedMax, stats.getMax(), 0d); + assertEquals(expectedMin, stats.getMin(), 0d); + } + ); + } + public void testCase(MappedFieldType ft, CheckedConsumer buildIndex, Consumer verify) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorTests.java index ff9888a4981d3..edaf5ae03f99b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorTests.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.NumericUtils; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; @@ -107,7 +108,7 @@ public void testQueryFiltering() throws IOException { } public void testStringField() throws IOException { - IllegalStateException e = expectThrows(IllegalStateException.class , () -> { + IllegalStateException e = expectThrows(IllegalStateException.class, () -> { testCase(new MatchAllDocsQuery(), iw -> { iw.addDocument(singleton(new SortedDocValuesField(FIELD_NAME, new BytesRef("1")))); }, count -> assertEquals(0L, count.getValue(), 0d)); @@ -116,10 +117,59 @@ public void testStringField() throws IOException { "Re-index with correct docvalues type.", e.getMessage()); } + public void testSummationAccuracy() throws IOException { + // Summing up a normal array and expect an accurate value + double[] values = new double[]{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7}; + verifySummationOfDoubles(values, 15.3, 0d); + + // Summing up an array which contains NaN and infinities and expect a result same as naive summation + int n = randomIntBetween(5, 10); + values = new double[n]; + double sum = 0; + for (int i = 0; i < n; i++) { + values[i] = frequently() + ? randomFrom(Double.NaN, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY) + : randomDoubleBetween(Double.MIN_VALUE, Double.MAX_VALUE, true); + sum += values[i]; + } + verifySummationOfDoubles(values, sum, 1e-10); + + // Summing up some big double values and expect infinity result + n = randomIntBetween(5, 10); + double[] largeValues = new double[n]; + for (int i = 0; i < n; i++) { + largeValues[i] = Double.MAX_VALUE; + } + verifySummationOfDoubles(largeValues, Double.POSITIVE_INFINITY, 0d); + + for (int i = 0; i < n; i++) { + largeValues[i] = -Double.MAX_VALUE; + } + verifySummationOfDoubles(largeValues, Double.NEGATIVE_INFINITY, 0d); + } + + private void verifySummationOfDoubles(double[] values, double expected, double delta) throws IOException { + testCase(new MatchAllDocsQuery(), + iw -> { + for (double value : values) { + iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, NumericUtils.doubleToSortableLong(value)))); + } + }, + result -> assertEquals(expected, result.getValue(), delta), + NumberFieldMapper.NumberType.DOUBLE + ); + } + private void testCase(Query query, CheckedConsumer indexer, Consumer verify) throws IOException { + testCase(query, indexer, verify, NumberFieldMapper.NumberType.LONG); + } + private void testCase(Query query, + CheckedConsumer indexer, + Consumer verify, + NumberFieldMapper.NumberType fieldNumberType) throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { indexer.accept(indexWriter); @@ -128,7 +178,7 @@ private void testCase(Query query, try (IndexReader indexReader = DirectoryReader.open(directory)) { IndexSearcher indexSearcher = newSearcher(indexReader, true, true); - MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG); + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(fieldNumberType); fieldType.setName(FIELD_NAME); fieldType.setHasDocValues(true); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorTests.java index 2849ede447b60..7835bf75e721f 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorTests.java @@ -30,13 +30,11 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.util.NumericUtils; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregator; -import org.elasticsearch.search.aggregations.metrics.avg.InternalAvg; import java.io.IOException; import java.util.Arrays; @@ -103,8 +101,59 @@ public void testQueryFiltersAll() throws IOException { }); } - private void testCase(Query query, CheckedConsumer buildIndex, Consumer verify) - throws IOException { + public void testSummationAccuracy() throws IOException { + // Summing up a normal array and expect an accurate value + double[] values = new double[]{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.9, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7}; + verifyAvgOfDoubles(values, 0.9, 0d); + + // Summing up an array which contains NaN and infinities and expect a result same as naive summation + int n = randomIntBetween(5, 10); + values = new double[n]; + double sum = 0; + for (int i = 0; i < n; i++) { + values[i] = frequently() + ? randomFrom(Double.NaN, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY) + : randomDoubleBetween(Double.MIN_VALUE, Double.MAX_VALUE, true); + sum += values[i]; + } + verifyAvgOfDoubles(values, sum / n, 1e-10); + + // Summing up some big double values and expect infinity result + n = randomIntBetween(5, 10); + double[] largeValues = new double[n]; + for (int i = 0; i < n; i++) { + largeValues[i] = Double.MAX_VALUE; + } + verifyAvgOfDoubles(largeValues, Double.POSITIVE_INFINITY, 0d); + + for (int i = 0; i < n; i++) { + largeValues[i] = -Double.MAX_VALUE; + } + verifyAvgOfDoubles(largeValues, Double.NEGATIVE_INFINITY, 0d); + } + + private void verifyAvgOfDoubles(double[] values, double expected, double delta) throws IOException { + testCase(new MatchAllDocsQuery(), + iw -> { + for (double value : values) { + iw.addDocument(singleton(new NumericDocValuesField("number", NumericUtils.doubleToSortableLong(value)))); + } + }, + avg -> assertEquals(expected, avg.getValue(), delta), + NumberFieldMapper.NumberType.DOUBLE + ); + } + + private void testCase(Query query, + CheckedConsumer buildIndex, + Consumer verify) throws IOException { + testCase(query, buildIndex, verify, NumberFieldMapper.NumberType.LONG); + } + + private void testCase(Query query, + CheckedConsumer buildIndex, + Consumer verify, + NumberFieldMapper.NumberType fieldNumberType) throws IOException { Directory directory = newDirectory(); RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); buildIndex.accept(indexWriter); @@ -114,7 +163,7 @@ private void testCase(Query query, CheckedConsumer inputs) { assertEquals(sum / counts, reduced.value(), 0.0000001); } + public void testSummationAccuracy() { + double[] values = new double[]{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.9, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7}; + verifyAvgOfDoubles(values, 0.9, 0d); + + int n = randomIntBetween(5, 10); + values = new double[n]; + double sum = 0; + for (int i = 0; i < n; i++) { + values[i] = frequently() + ? randomFrom(Double.NaN, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY) + : randomDoubleBetween(Double.MIN_VALUE, Double.MAX_VALUE, true); + sum += values[i]; + } + verifyAvgOfDoubles(values, sum / n, TOLERANCE); + + // Summing up some big double values and expect infinity result + n = randomIntBetween(5, 10); + double[] largeValues = new double[n]; + for (int i = 0; i < n; i++) { + largeValues[i] = Double.MAX_VALUE; + } + verifyAvgOfDoubles(largeValues, Double.POSITIVE_INFINITY, 0d); + + for (int i = 0; i < n; i++) { + largeValues[i] = -Double.MAX_VALUE; + } + verifyAvgOfDoubles(largeValues, Double.NEGATIVE_INFINITY, 0d); + } + + private void verifyAvgOfDoubles(double[] values, double expected, double delta) { + List aggregations = new ArrayList<>(values.length); + for (double value : values) { + aggregations.add(new InternalAvg("dummy1", value, 1, null, null, null)); + } + InternalAvg internalAvg = new InternalAvg("dummy2", 0, 0, null, null, null); + InternalAvg reduced = internalAvg.doReduce(aggregations, null); + assertEquals(expected, reduced.getValue(), delta); + } + @Override protected void assertFromXContent(InternalAvg avg, ParsedAggregation parsedAggregation) { ParsedAvg parsed = ((ParsedAvg) parsedAggregation); diff --git a/server/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java b/server/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java index cd15c9668348b..36d672e40f278 100644 --- a/server/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java +++ b/server/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java @@ -129,7 +129,6 @@ public void testChangingEagerParentFieldLoadingAtRuntime() throws Exception { PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("child") .setSource(childMapping(true)) - .setUpdateAllTypes(true) .get(); assertAcked(putMappingResponse); Index test = resolveIndex("test"); diff --git a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 4b56d2bc9e1fe..c3f1da82c7984 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -36,6 +36,7 @@ import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.query.WrapperQueryBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders; @@ -1893,4 +1894,17 @@ public void testQueryStringParserCache() throws Exception { } } + public void testRangeQueryRangeFields_24744() throws Exception { + assertAcked(prepareCreate("test").addMapping("type1", "int_range", "type=integer_range")); + + client().prepareIndex("test", "type1", "1") + .setSource(jsonBuilder().startObject().startObject("int_range").field("gte", 10).field("lte", 20).endObject().endObject()) + .get(); + refresh(); + + RangeQueryBuilder range = new RangeQueryBuilder("int_range").relation("intersects").from(Integer.MIN_VALUE).to(Integer.MAX_VALUE); + SearchResponse searchResponse = client().prepareSearch("test").setQuery(range).get(); + assertHitCount(searchResponse, 1); + } + } diff --git a/settings.gradle b/settings.gradle index b844af52df76b..e3a24ea148d95 100644 --- a/settings.gradle +++ b/settings.gradle @@ -27,113 +27,9 @@ List projects = [ 'test:fixtures:hdfs-fixture', 'test:fixtures:krb5kdc-fixture', 'test:fixtures:old-elasticsearch', - 'test:logger-usage', - 'libs:elasticsearch-core', - 'libs:elasticsearch-nio', - 'modules:aggs-matrix-stats', - 'modules:analysis-common', - 'modules:ingest-common', - 'modules:lang-expression', - 'modules:lang-mustache', - 'modules:lang-painless', - 'modules:mapper-extras', - 'modules:parent-join', - 'modules:percolator', - 'modules:rank-eval', - 'modules:reindex', - 'modules:repository-url', - 'modules:transport-netty4', - 'modules:tribe', - 'plugins:analysis-icu', - 'plugins:analysis-kuromoji', - 'plugins:analysis-phonetic', - 'plugins:analysis-smartcn', - 'plugins:analysis-stempel', - 'plugins:analysis-ukrainian', - 'plugins:discovery-azure-classic', - 'plugins:discovery-ec2', - 'plugins:discovery-file', - 'plugins:discovery-gce', - 'plugins:ingest-geoip', - 'plugins:ingest-attachment', - 'plugins:ingest-user-agent', - 'plugins:mapper-murmur3', - 'plugins:mapper-size', - 'plugins:repository-azure', - 'plugins:repository-gcs', - 'plugins:repository-hdfs', - 'plugins:repository-s3', - 'plugins:jvm-example', - 'plugins:store-smb', - 'plugins:transport-nio', - 'qa:auto-create-index', - 'qa:ccs-unavailable-clusters', - 'qa:evil-tests', - 'qa:full-cluster-restart', - 'qa:integration-bwc', - 'qa:mixed-cluster', - 'qa:multi-cluster-search', - 'qa:no-bootstrap-tests', - 'qa:reindex-from-old', - 'qa:rolling-upgrade', - 'qa:smoke-test-client', - 'qa:smoke-test-http', - 'qa:smoke-test-ingest-with-all-dependencies', - 'qa:smoke-test-ingest-disabled', - 'qa:smoke-test-multinode', - 'qa:smoke-test-rank-eval-with-mustache', - 'qa:smoke-test-plugins', - 'qa:smoke-test-reindex-with-all-modules', - 'qa:smoke-test-tribe-node', - 'qa:vagrant', - 'qa:verify-version-constants', - 'qa:wildfly', - 'qa:query-builder-bwc' + 'test:logger-usage' ] -projects.add("libs") -File libsDir = new File(rootProject.projectDir, 'libs') -for (File libDir : new File(rootProject.projectDir, 'libs').listFiles()) { - if (libDir.isDirectory() == false) continue; - if (libDir.name.startsWith('build') || libDir.name.startsWith('.')) continue; - projects.add("libs:${libDir.name}".toString()) -} - -/* Create projects for building BWC snapshot distributions from the heads of other branches */ -final List branches = ['5.6', '6.0', '6.1', '6.x'] -for (final String branch : branches) { - projects.add("distribution:bwc-snapshot-${branch}".toString()) -} - -boolean isEclipse = System.getProperty("eclipse.launcher") != null || gradle.startParameter.taskNames.contains('eclipse') || gradle.startParameter.taskNames.contains('cleanEclipse') -if (isEclipse) { - // eclipse cannot handle an intermediate dependency between main and test, so we must create separate projects - // for server-src and server-tests - projects << 'server-tests' - projects << 'libs:elasticsearch-nio-tests' -} - -include projects.toArray(new String[0]) - -project(':build-tools').projectDir = new File(rootProject.projectDir, 'buildSrc') - -/* The BWC snapshot projects share the same build directory and build file, - * but apply to different backwards compatibility branches. */ -for (final String branch : branches) { - project(":distribution:bwc-snapshot-${branch}").projectDir = new File(rootProject.projectDir, 'distribution/bwc') -} - -if (isEclipse) { - project(":server").projectDir = new File(rootProject.projectDir, 'server/src/main') - project(":server").buildFileName = 'eclipse-build.gradle' - project(":server-tests").projectDir = new File(rootProject.projectDir, 'server/src/test') - project(":server-tests").buildFileName = 'eclipse-build.gradle' - project(":libs:elasticsearch-nio").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-nio/src/main') - project(":libs:elasticsearch-nio").buildFileName = 'eclipse-build.gradle' - project(":libs:elasticsearch-nio-tests").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-nio/src/test') - project(":libs:elasticsearch-nio-tests").buildFileName = 'eclipse-build.gradle' -} - /** * Iterates over sub directories, looking for build.gradle, and adds a project if found * for that dir with the given path prefix. Note that this requires each level @@ -144,6 +40,7 @@ void addSubProjects(String path, File dir, List projects, List b if (dir.isDirectory() == false) return; if (dir.name == 'buildSrc') return; if (new File(dir, 'build.gradle').exists() == false) return; + if (findProject(dir) != null) return; final String projectName = "${path}:${dir.name}" include projectName @@ -166,7 +63,7 @@ void addSubProjects(String path, File dir, List projects, List b } } -// include example plugins +// include example plugins first, so adding plugin dirs below won't muck with :example-plugins File examplePluginsDir = new File(rootProject.projectDir, 'plugins/examples') for (File example : examplePluginsDir.listFiles()) { if (example.isDirectory() == false) continue; @@ -175,6 +72,51 @@ for (File example : examplePluginsDir.listFiles()) { } project(':example-plugins').projectDir = new File(rootProject.projectDir, 'plugins/examples') +addSubProjects('', new File(rootProject.projectDir, 'libs'), projects, []) +addSubProjects('', new File(rootProject.projectDir, 'modules'), projects, []) +addSubProjects('', new File(rootProject.projectDir, 'plugins'), projects, []) +addSubProjects('', new File(rootProject.projectDir, 'qa'), projects, []) + +/* Create projects for building BWC snapshot distributions from the heads of other branches */ +final List branches = ['5.6', '6.0', '6.1', '6.2', '6.x'] +for (final String branch : branches) { + projects.add("distribution:bwc-snapshot-${branch}".toString()) +} + +boolean isEclipse = System.getProperty("eclipse.launcher") != null || gradle.startParameter.taskNames.contains('eclipse') || gradle.startParameter.taskNames.contains('cleanEclipse') +if (isEclipse) { + // eclipse cannot handle an intermediate dependency between main and test, so we must create separate projects + // for server-src and server-tests + projects << 'server-tests' + projects << 'libs:elasticsearch-core-tests' + projects << 'libs:elasticsearch-nio-tests' +} + +include projects.toArray(new String[0]) + +project(':build-tools').projectDir = new File(rootProject.projectDir, 'buildSrc') + +/* The BWC snapshot projects share the same build directory and build file, + * but apply to different backwards compatibility branches. */ +for (final String branch : branches) { + project(":distribution:bwc-snapshot-${branch}").projectDir = new File(rootProject.projectDir, 'distribution/bwc') +} + +if (isEclipse) { + project(":server").projectDir = new File(rootProject.projectDir, 'server/src/main') + project(":server").buildFileName = 'eclipse-build.gradle' + project(":server-tests").projectDir = new File(rootProject.projectDir, 'server/src/test') + project(":server-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-core").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-core/src/main') + project(":libs:elasticsearch-core").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-core-tests").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-core/src/test') + project(":libs:elasticsearch-core-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-nio").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-nio/src/main') + project(":libs:elasticsearch-nio").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-nio-tests").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-nio/src/test') + project(":libs:elasticsearch-nio-tests").buildFileName = 'eclipse-build.gradle' +} + // look for extra plugins for elasticsearch File extraProjects = new File(rootProject.projectDir.parentFile, "${dirName}-extra") if (extraProjects.exists()) { @@ -182,5 +124,4 @@ if (extraProjects.exists()) { addSubProjects('', extraProjectDir, projects, branches) } } -include 'libs' diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java index 6834d124c499a..4bdd9b84ec463 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java @@ -80,7 +80,7 @@ private void applyOperation(Engine engine, Engine.Operation operation) throws IO Engine.Index engineIndex = (Engine.Index) operation; Mapping update = engineIndex.parsedDoc().dynamicMappingsUpdate(); if (engineIndex.parsedDoc().dynamicMappingsUpdate() != null) { - recoveredTypes.compute(engineIndex.type(), (k, mapping) -> mapping == null ? update : mapping.merge(update, false)); + recoveredTypes.compute(engineIndex.type(), (k, mapping) -> mapping == null ? update : mapping.merge(update)); } engine.index(engineIndex); break; diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java index 2b6f4c38a902b..818594d3bf7fd 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java @@ -42,12 +42,12 @@ public abstract class FieldTypeTestCase extends ESTestCase { public abstract static class Modifier { /** The name of the property that is being modified. Used in test failure messages. */ public final String property; - /** true if this modifier only makes types incompatible in strict mode, false otherwise */ - public final boolean strictOnly; + /** True if this property is updateable, false otherwise. */ + public final boolean updateable; - public Modifier(String property, boolean strictOnly) { + public Modifier(String property, boolean updateable) { this.property = property; - this.strictOnly = strictOnly; + this.updateable = updateable; } /** Modifies the property */ @@ -189,16 +189,16 @@ protected void assertFieldTypeNotEquals(String property, MappedFieldType ft1, Ma } } - protected void assertCompatible(String msg, MappedFieldType ft1, MappedFieldType ft2, boolean strict) { + protected void assertCompatible(String msg, MappedFieldType ft1, MappedFieldType ft2) { List conflicts = new ArrayList<>(); - ft1.checkCompatibility(ft2, conflicts, strict); + ft1.checkCompatibility(ft2, conflicts); assertTrue("Found conflicts for " + msg + ": " + conflicts, conflicts.isEmpty()); } - protected void assertNotCompatible(String msg, MappedFieldType ft1, MappedFieldType ft2, boolean strict, String... messages) { + protected void assertNotCompatible(String msg, MappedFieldType ft1, MappedFieldType ft2, String... messages) { assert messages.length != 0; List conflicts = new ArrayList<>(); - ft1.checkCompatibility(ft2, conflicts, strict); + ft1.checkCompatibility(ft2, conflicts); for (String message : messages) { boolean found = false; for (String conflict : conflicts) { @@ -279,7 +279,7 @@ public void testFreeze() { public void testCheckTypeName() { final MappedFieldType fieldType = createNamedDefaultFieldType(); List conflicts = new ArrayList<>(); - fieldType.checkCompatibility(fieldType, conflicts, random().nextBoolean()); // no exception + fieldType.checkCompatibility(fieldType, conflicts); // no exception assertTrue(conflicts.toString(), conflicts.isEmpty()); MappedFieldType bogus = new TermBasedFieldType() { @@ -291,7 +291,7 @@ public void testCheckTypeName() { public Query existsQuery(QueryShardContext context) { return null; } }; try { - fieldType.checkCompatibility(bogus, conflicts, random().nextBoolean()); + fieldType.checkCompatibility(bogus, conflicts); fail("expected bad types exception"); } catch (IllegalStateException e) { assertTrue(e.getMessage().contains("Type names equal")); @@ -307,7 +307,7 @@ public void testCheckTypeName() { public Query existsQuery(QueryShardContext context) { return null; } }; try { - fieldType.checkCompatibility(other, conflicts, random().nextBoolean()); + fieldType.checkCompatibility(other, conflicts); fail(); } catch (IllegalArgumentException e) { assertTrue(e.getMessage(), e.getMessage().contains("cannot be changed from type")); @@ -318,32 +318,22 @@ public void testCheckTypeName() { public void testCheckCompatibility() { MappedFieldType ft1 = createNamedDefaultFieldType(); MappedFieldType ft2 = createNamedDefaultFieldType(); - assertCompatible("default", ft1, ft2, true); - assertCompatible("default", ft1, ft2, false); - assertCompatible("default", ft2, ft1, true); - assertCompatible("default", ft2, ft1, false); + assertCompatible("default", ft1, ft2); + assertCompatible("default", ft2, ft1); for (Modifier modifier : modifiers) { ft1 = createNamedDefaultFieldType(); ft2 = createNamedDefaultFieldType(); modifier.normalizeOther(ft1); modifier.modify(ft2); - if (modifier.strictOnly) { - String[] conflicts = { - "mapper [foo] is used by multiple types", - "update [" + modifier.property + "]" - }; - assertCompatible(modifier.property, ft1, ft2, false); - assertNotCompatible(modifier.property, ft1, ft2, true, conflicts); - assertCompatible(modifier.property, ft2, ft1, false); // always symmetric when not strict - assertNotCompatible(modifier.property, ft2, ft1, true, conflicts); + if (modifier.updateable) { + assertCompatible(modifier.property, ft1, ft2); + assertCompatible(modifier.property, ft2, ft1); // always symmetric when not strict } else { // not compatible whether strict or not String conflict = "different [" + modifier.property + "]"; - assertNotCompatible(modifier.property, ft1, ft2, true, conflict); - assertNotCompatible(modifier.property, ft1, ft2, false, conflict); - assertNotCompatible(modifier.property, ft2, ft1, true, conflict); - assertNotCompatible(modifier.property, ft2, ft1, false, conflict); + assertNotCompatible(modifier.property, ft1, ft2, conflict); + assertNotCompatible(modifier.property, ft2, ft1, conflict); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index cd55c1126eb1c..ca9d1728d92bb 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -287,7 +287,7 @@ protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMe IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings), null); MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), indexSettings.getSettings(), "index"); - mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, true); + mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY); SimilarityService similarityService = new SimilarityService(indexSettings, null, Collections.emptyMap()); final IndexEventListener indexEventListener = new IndexEventListener() { }; @@ -579,7 +579,7 @@ protected Consumer getMappingUpdater(IndexShard shard, String type) { protected void updateMappings(IndexShard shard, IndexMetaData indexMetadata) { shard.indexSettings().updateIndexMetaData(indexMetadata); - shard.mapperService().merge(indexMetadata, MapperService.MergeReason.MAPPING_UPDATE, true); + shard.mapperService().merge(indexMetadata, MapperService.MergeReason.MAPPING_UPDATE); } protected Engine.DeleteResult deleteDoc(IndexShard shard, String type, String id) throws IOException { diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index f833e3c61002c..f8b1572fa09cb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -138,17 +138,19 @@ public abstract class AbstractQueryTestCase> public static final String STRING_FIELD_NAME = "mapped_string"; protected static final String STRING_FIELD_NAME_2 = "mapped_string_2"; protected static final String INT_FIELD_NAME = "mapped_int"; + protected static final String INT_RANGE_FIELD_NAME = "mapped_int_range"; protected static final String DOUBLE_FIELD_NAME = "mapped_double"; protected static final String BOOLEAN_FIELD_NAME = "mapped_boolean"; protected static final String DATE_FIELD_NAME = "mapped_date"; + protected static final String DATE_RANGE_FIELD_NAME = "mapped_date_range"; protected static final String OBJECT_FIELD_NAME = "mapped_object"; protected static final String GEO_POINT_FIELD_NAME = "mapped_geo_point"; protected static final String GEO_SHAPE_FIELD_NAME = "mapped_geo_shape"; - protected static final String[] MAPPED_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, - DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, OBJECT_FIELD_NAME, GEO_POINT_FIELD_NAME, + protected static final String[] MAPPED_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, INT_RANGE_FIELD_NAME, + DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, DATE_RANGE_FIELD_NAME, OBJECT_FIELD_NAME, GEO_POINT_FIELD_NAME, GEO_SHAPE_FIELD_NAME}; - private static final String[] MAPPED_LEAF_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, - DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, GEO_POINT_FIELD_NAME, }; + private static final String[] MAPPED_LEAF_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, INT_RANGE_FIELD_NAME, + DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, DATE_RANGE_FIELD_NAME, GEO_POINT_FIELD_NAME, }; private static final int NUMBER_OF_TESTQUERIES = 20; protected static Version indexVersionCreated; @@ -1077,18 +1079,20 @@ public void onRemoval(ShardId shardId, Accountable accountable) { STRING_FIELD_NAME, "type=text", STRING_FIELD_NAME_2, "type=keyword", INT_FIELD_NAME, "type=integer", + INT_RANGE_FIELD_NAME, "type=integer_range", DOUBLE_FIELD_NAME, "type=double", BOOLEAN_FIELD_NAME, "type=boolean", DATE_FIELD_NAME, "type=date", + DATE_RANGE_FIELD_NAME, "type=date_range", OBJECT_FIELD_NAME, "type=object", GEO_POINT_FIELD_NAME, "type=geo_point", GEO_SHAPE_FIELD_NAME, "type=geo_shape" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); // also add mappings for two inner field in the object field mapperService.merge(type, new CompressedXContent("{\"properties\":{\"" + OBJECT_FIELD_NAME + "\":{\"type\":\"object\"," + "\"properties\":{\"" + DATE_FIELD_NAME + "\":{\"type\":\"date\"},\"" + INT_FIELD_NAME + "\":{\"type\":\"integer\"}}}}}"), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); } testCase.initializeAdditionalMappings(mapperService); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index e633f5adb70af..0097621e06292 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -32,7 +32,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; @@ -62,6 +61,7 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.ClearScrollResponse; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.AdminClient; import org.elasticsearch.client.Client; @@ -1275,7 +1275,7 @@ protected final void flushAndRefresh(String... indices) { protected final FlushResponse flush(String... indices) { waitForRelocation(); FlushResponse actionGet = client().admin().indices().prepareFlush(indices).execute().actionGet(); - for (ShardOperationFailedException failure : actionGet.getShardFailures()) { + for (DefaultShardOperationFailedException failure : actionGet.getShardFailures()) { assertThat("unexpected flush failure " + failure.reason(), failure.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); } return actionGet; diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java index ea846c5dd1841..8f5fe5d5622e7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java @@ -150,6 +150,7 @@ public abstract class InternalAggregationTestCase extends AbstractWireSerializingTestCase { public static final int DEFAULT_MAX_BUCKETS = 100000; + protected static final double TOLERANCE = 1e-10; private final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry( new SearchModule(Settings.EMPTY, false, emptyList()).getNamedWriteables()); diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 4eaaa96df7649..ff31240169ef7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -27,7 +27,6 @@ import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistResponse; @@ -41,6 +40,7 @@ import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -163,7 +163,7 @@ public static void assertBlocked(ActionRequestBuilder builder) { * */ public static void assertBlocked(BroadcastResponse replicatedBroadcastResponse) { assertThat("all shard requests should have failed", replicatedBroadcastResponse.getFailedShards(), Matchers.equalTo(replicatedBroadcastResponse.getTotalShards())); - for (ShardOperationFailedException exception : replicatedBroadcastResponse.getShardFailures()) { + for (DefaultShardOperationFailedException exception : replicatedBroadcastResponse.getShardFailures()) { ClusterBlockException clusterBlockException = (ClusterBlockException) ExceptionsHelper.unwrap(exception.getCause(), ClusterBlockException.class); assertNotNull("expected the cause of failure to be a ClusterBlockException but got " + exception.getCause().getMessage(), clusterBlockException); assertThat(clusterBlockException.blocks().size(), greaterThan(0)); @@ -203,7 +203,7 @@ public static String formatShardStatus(BroadcastResponse response) { msg.append(" Total shards: ").append(response.getTotalShards()) .append(" Successful shards: ").append(response.getSuccessfulShards()) .append(" & ").append(response.getFailedShards()).append(" shard failures:"); - for (ShardOperationFailedException failure : response.getShardFailures()) { + for (DefaultShardOperationFailedException failure : response.getShardFailures()) { msg.append("\n ").append(failure); } return msg.toString(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java index 6e2f43ae75281..01fd3bad0f3e1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java @@ -165,7 +165,7 @@ public ClientYamlTestResponse callApi(String apiName, Map params Header[] requestHeaders = new Header[headers.size()]; int index = 0; for (Map.Entry header : headers.entrySet()) { - logger.info("Adding header {} with value {}", header.getKey(), header.getValue()); + logger.debug("Adding header {} with value {}", header.getKey(), header.getValue()); requestHeaders[index++] = new BasicHeader(header.getKey(), header.getValue()); } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 20971b3865ea1..ed0431d96785c 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -1927,16 +1927,12 @@ public void testTimeoutPerConnection() throws IOException { public void testHandshakeWithIncompatVersion() { assumeTrue("only tcp transport has a handshake method", serviceA.getOriginalTransport() instanceof TcpTransport); - NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); Version version = Version.fromString("2.0.0"); - try (MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, - new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList()), version); - MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, null, - Collections.emptySet())) { + try (MockTransportService service = build(Settings.EMPTY, version, null, true)) { service.start(); service.acceptIncomingRequests(); - DiscoveryNode node = - new DiscoveryNode("TS_TPC", "TS_TPC", transport.boundAddress().publishAddress(), emptyMap(), emptySet(), version0); + TransportAddress address = service.boundAddress().publishAddress(); + DiscoveryNode node = new DiscoveryNode("TS_TPC", "TS_TPC", address, emptyMap(), emptySet(), version0); ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); builder.addConnections(1, TransportRequestOptions.Type.BULK, @@ -1950,17 +1946,12 @@ public void testHandshakeWithIncompatVersion() { public void testHandshakeUpdatesVersion() throws IOException { assumeTrue("only tcp transport has a handshake method", serviceA.getOriginalTransport() instanceof TcpTransport); - NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); Version version = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT); - try (MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, - new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList()), version); - MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, null, - Collections.emptySet())) { + try (MockTransportService service = build(Settings.EMPTY, version, null, true)) { service.start(); service.acceptIncomingRequests(); - DiscoveryNode node = - new DiscoveryNode("TS_TPC", "TS_TPC", transport.boundAddress().publishAddress(), emptyMap(), emptySet(), - Version.fromString("2.0.0")); + TransportAddress address = service.boundAddress().publishAddress(); + DiscoveryNode node = new DiscoveryNode("TS_TPC", "TS_TPC", address, emptyMap(), emptySet(), Version.fromString("2.0.0")); ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); builder.addConnections(1, TransportRequestOptions.Type.BULK, diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index a8876453b5b2f..ec262261e54c8 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -31,14 +31,14 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.nio.AcceptingSelector; import org.elasticsearch.nio.AcceptorEventHandler; -import org.elasticsearch.nio.BytesReadContext; -import org.elasticsearch.nio.BytesWriteContext; +import org.elasticsearch.nio.BytesChannelContext; import org.elasticsearch.nio.ChannelFactory; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioGroup; import org.elasticsearch.nio.NioServerSocketChannel; import org.elasticsearch.nio.NioSocketChannel; -import org.elasticsearch.nio.ReadContext; +import org.elasticsearch.nio.ServerChannelContext; +import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.nio.SocketSelector; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TcpChannel; @@ -162,18 +162,19 @@ public MockSocketChannel createChannel(SocketSelector selector, SocketChannel ch Recycler.V bytes = pageCacheRecycler.bytePage(false); return new InboundChannelBuffer.Page(ByteBuffer.wrap(bytes.v()), bytes::close); }; - ReadContext.ReadConsumer nioReadConsumer = channelBuffer -> + SocketChannelContext.ReadConsumer nioReadConsumer = channelBuffer -> consumeNetworkReads(nioChannel, BytesReference.fromByteBuffers(channelBuffer.sliceBuffersTo(channelBuffer.getIndex()))); - BytesReadContext readContext = new BytesReadContext(nioChannel, nioReadConsumer, new InboundChannelBuffer(pageSupplier)); - BytesWriteContext writeContext = new BytesWriteContext(nioChannel); - nioChannel.setContexts(readContext, writeContext, MockNioTransport.this::exceptionCaught); + BytesChannelContext context = new BytesChannelContext(nioChannel, MockNioTransport.this::exceptionCaught, nioReadConsumer, + new InboundChannelBuffer(pageSupplier)); + nioChannel.setContext(context); return nioChannel; } @Override public MockServerChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { MockServerChannel nioServerChannel = new MockServerChannel(profileName, channel, this, selector); - nioServerChannel.setAcceptContext(MockNioTransport.this::acceptChannel); + ServerChannelContext context = new ServerChannelContext(nioServerChannel, MockNioTransport.this::acceptChannel, (c, e) -> {}); + nioServerChannel.setContext(context); return nioServerChannel; } } @@ -188,6 +189,11 @@ private static class MockServerChannel extends NioServerSocketChannel implements this.profile = profile; } + @Override + public void close() { + getSelector().queueChannelClose(this); + } + @Override public String getProfile() { return profile; @@ -224,6 +230,11 @@ private MockSocketChannel(String profile, java.nio.channels.SocketChannel socket this.profile = profile; } + @Override + public void close() { + getContext().closeChannel(); + } + @Override public String getProfile() { return profile; @@ -243,7 +254,7 @@ public void setSoLinger(int value) throws IOException { @Override public void sendMessage(BytesReference reference, ActionListener listener) { - getWriteContext().sendMessage(BytesReference.toByteBuffers(reference), ActionListener.toBiConsumer(listener)); + getContext().sendMessage(BytesReference.toByteBuffers(reference), ActionListener.toBiConsumer(listener)); } } } From a7cbc48b6ef6590c5c8242ba807ef116b61a50a2 Mon Sep 17 00:00:00 2001 From: PnPie Date: Sat, 27 Jan 2018 12:06:42 +0100 Subject: [PATCH 04/12] fix conflicts --- .../elasticsearch/client/IndicesClient.java | 81 +++++++++++++++---- .../org/elasticsearch/client/Request.java | 6 ++ .../elasticsearch/client/IndicesClientIT.java | 39 ++------- modules/lang-painless/spi/build.gradle | 40 +++++++++ 4 files changed, 117 insertions(+), 49 deletions(-) create mode 100644 modules/lang-painless/spi/build.gradle diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index 5906faf1f8947..77f60cdf5acff 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -21,6 +21,9 @@ import org.apache.http.Header; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; @@ -35,7 +38,8 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import java.io.IOException; -import java.util.Collections; + +import static java.util.Collections.emptySet; /** * A wrapper for the {@link RestHighLevelClient} that provides methods for accessing the Indices API. @@ -57,7 +61,7 @@ public final class IndicesClient { */ public DeleteIndexResponse delete(DeleteIndexRequest deleteIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent, - Collections.emptySet(), headers); + emptySet(), headers); } /** @@ -68,7 +72,7 @@ public DeleteIndexResponse delete(DeleteIndexRequest deleteIndexRequest, Header. */ public void deleteAsync(DeleteIndexRequest deleteIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent, - listener, Collections.emptySet(), headers); + listener, emptySet(), headers); } /** @@ -79,7 +83,7 @@ public void deleteAsync(DeleteIndexRequest deleteIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent, - listener, Collections.emptySet(), headers); + listener, emptySet(), headers); } /** @@ -101,7 +105,7 @@ public void createAsync(CreateIndexRequest createIndexRequest, ActionListener * Put Mapping API on elastic.co */ - public void putMappingAsync(PutMappingRequest putMappingRequest, ActionListener listener, - Header... headers) { + public void putMappingAsync(PutMappingRequest putMappingRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, Request::putMapping, PutMappingResponse::fromXContent, - listener, Collections.emptySet(), headers); + listener, emptySet(), headers); + } + + /** + * Updates aliases using the Index Aliases API + *

+ * See + * Index Aliases API on elastic.co + */ + public IndicesAliasesResponse updateAliases(IndicesAliasesRequest indicesAliasesRequest, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(indicesAliasesRequest, Request::updateAliases, + IndicesAliasesResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously updates aliases using the Index Aliases API + *

+ * See + * Index Aliases API on elastic.co + */ + public void updateAliasesAsync(IndicesAliasesRequest indicesAliasesRequestRequest, ActionListener listener, + Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(indicesAliasesRequestRequest, Request::updateAliases, + IndicesAliasesResponse::fromXContent, listener, emptySet(), headers); } /** @@ -124,7 +152,7 @@ public void putMappingAsync(PutMappingRequest putMappingRequest, ActionListener< */ public OpenIndexResponse open(OpenIndexRequest openIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent, - Collections.emptySet(), headers); + emptySet(), headers); } /** @@ -135,7 +163,7 @@ public OpenIndexResponse open(OpenIndexRequest openIndexRequest, Header... heade */ public void openAsync(OpenIndexRequest openIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent, - listener, Collections.emptySet(), headers); + listener, emptySet(), headers); } /** @@ -146,7 +174,7 @@ public void openAsync(OpenIndexRequest openIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, Request::closeIndex, CloseIndexResponse::fromXContent, - listener, Collections.emptySet(), headers); + listener, emptySet(), headers); } + + /** + * Checks if one or more aliases exist using the Aliases Exist API + *

+ * See + * Indices Aliases API on elastic.co + */ + public boolean existsAlias(GetAliasesRequest getAliasesRequest, Header... headers) throws IOException { + return restHighLevelClient.performRequest(getAliasesRequest, Request::existsAlias, RestHighLevelClient::convertExistsResponse, + emptySet(), headers); + } + + /** + * Asynchronously checks if one or more aliases exist using the Aliases Exist API + *

+ * See + * Indices Aliases API on elastic.co + */ + public void existsAliasAsync(GetAliasesRequest getAliasesRequest, ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsync(getAliasesRequest, Request::existsAlias, RestHighLevelClient::convertExistsResponse, + listener, emptySet(), headers); + } + /** * Refresh one or more indices using the Refresh API *

@@ -166,7 +217,7 @@ public void closeAsync(CloseIndexRequest closeIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(refreshRequest, Request::refresh, RefreshResponse::fromXContent, - listener, Collections.emptySet(), headers); + listener, emptySet(), headers); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java index f1c944ab983d8..26431bb6e8fc1 100755 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -36,6 +36,7 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.get.GetRequest; @@ -206,6 +207,11 @@ static Request putMapping(PutMappingRequest putMappingRequest) throws IOExceptio return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity); } + static Request refresh(RefreshRequest refreshRequest) { + String endpoint = endpoint(refreshRequest.indices(), Strings.EMPTY_ARRAY, "_refresh"); + return new Request(HttpPost.METHOD_NAME, endpoint, Collections.emptyMap(), null); + } + static Request info() { return new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 797fae611d7df..00b8e18274268 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -43,6 +43,7 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -56,13 +57,11 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.hamcrest.CoreMatchers.hasItem; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; public class IndicesClientIT extends ESRestHighLevelClientTestCase { - static { - System.setProperty("tests.rest.cluster", "localhost:9200"); - } @SuppressWarnings({ "unchecked", "rawtypes" }) public void testCreateIndex() throws IOException { @@ -127,35 +126,6 @@ public void testCreateIndex() throws IOException { } } - @SuppressWarnings("unchecked") - public void testPutMapping() throws IOException { - { - // Add mappings to index - String indexName = "mapping_index"; - createIndex(indexName); - - PutMappingRequest putMappingRequest = new PutMappingRequest(indexName); - putMappingRequest.type("type_name"); - XContentBuilder mappingBuilder = JsonXContent.contentBuilder(); - mappingBuilder.startObject().startObject("properties").startObject("field"); - mappingBuilder.field("type", "text"); - mappingBuilder.endObject().endObject().endObject(); - putMappingRequest.source(mappingBuilder); - - PutMappingResponse putMappingResponse = - execute(putMappingRequest, highLevelClient().indices()::putMapping, highLevelClient().indices()::putMappingAsync); - assertTrue(putMappingResponse.isAcknowledged()); - - Map indexMetaData = getIndexMetadata(indexName); - Map mappingsData = (Map) indexMetaData.get("mappings"); - Map typeData = (Map) mappingsData.get("type_name"); - Map properties = (Map) typeData.get("properties"); - Map field = (Map) properties.get("field"); - - assertEquals("text", field.get("type")); - } - } - @SuppressWarnings({ "unchecked", "rawtypes" }) public void testPutMapping() throws IOException { { @@ -345,7 +315,7 @@ public void testOpenNonExistentIndex() throws IOException { public void testCloseExistingIndex() throws IOException { String index = "index"; createIndex(index); - Response response = client().performRequest("GET", index + "/_search"); + Response response = client().performRequest(HttpGet.METHOD_NAME, index + "/_search"); assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); CloseIndexRequest closeIndexRequest = new CloseIndexRequest(index); @@ -353,7 +323,8 @@ public void testCloseExistingIndex() throws IOException { highLevelClient().indices()::closeAsync); assertTrue(closeIndexResponse.isAcknowledged()); - ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("GET", index + "/_search")); + ResponseException exception = expectThrows(ResponseException.class, + () -> client().performRequest(HttpGet.METHOD_NAME, index + "/_search")); assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus())); assertThat(exception.getMessage().contains(index), equalTo(true)); } diff --git a/modules/lang-painless/spi/build.gradle b/modules/lang-painless/spi/build.gradle new file mode 100644 index 0000000000000..7e43a242a23a9 --- /dev/null +++ b/modules/lang-painless/spi/build.gradle @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +apply plugin: 'elasticsearch.build' +apply plugin: 'nebula.maven-base-publish' +apply plugin: 'nebula.maven-scm' + +group = 'org.elasticsearch.plugin' +archivesBaseName = 'elasticsearch-scripting-painless-spi' + +publishing { + publications { + nebula { + artifactId = archivesBaseName + } + } +} + +dependencies { + compile "org.elasticsearch:elasticsearch:${version}" +} + +// no tests...yet? +test.enabled = false From 18086a3f70bde0dad66631cfa06acdaacc30ae40 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 9 Feb 2018 20:59:01 +0100 Subject: [PATCH 05/12] shardId can be null, shardId().getId() may throw NPE. --- .../support/DefaultShardOperationFailedException.java | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java b/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java index 72734dd7e1fd8..da0489b749a95 100644 --- a/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java +++ b/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java @@ -28,13 +28,14 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import java.io.IOException; import static org.elasticsearch.ExceptionsHelper.detailedMessage; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; public class DefaultShardOperationFailedException implements ShardOperationFailedException { @@ -59,8 +60,10 @@ protected DefaultShardOperationFailedException() { } public DefaultShardOperationFailedException(ElasticsearchException e) { - this.index = e.getIndex() == null ? null : e.getIndex().getName(); - this.shardId = e.getShardId().id(); + Index index = e.getIndex(); + this.index = index == null ? null : index.getName(); + ShardId shardId = e.getShardId(); + this.shardId = shardId == null ? -1 : shardId.id(); this.reason = e; this.status = e.status(); } From ed89a7a89638c609f12abb66b89c807df2e5affd Mon Sep 17 00:00:00 2001 From: PnPie Date: Sun, 11 Feb 2018 23:15:00 +0100 Subject: [PATCH 06/12] update --- .../elasticsearch/client/RequestTests.java | 15 +++- .../IndicesClientDocumentationIT.java | 68 +++++++++++++++++ .../high-level/indices/refresh.asciidoc | 75 +++++++++++++++++++ .../indices/refresh/RefreshResponseTests.java | 72 ++++++++---------- 4 files changed, 188 insertions(+), 42 deletions(-) create mode 100644 docs/java-rest/high-level/indices/refresh.asciidoc diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java index 3e4add16707ff..e4b9928349790 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.bulk.BulkRequest; @@ -532,6 +533,18 @@ public void testIndex() throws IOException { } } + public void testRefresh() { + String[] indices = randomIndicesNames(1, 5); + RefreshRequest refreshRequest = new RefreshRequest(indices); + + Request request = Request.refresh(refreshRequest); + StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_refresh"); + assertThat(endpoint.toString(), equalTo(request.getEndpoint())); + assertThat(request.getParameters().size(), equalTo(0)); + assertThat(request.getEntity(), nullValue()); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + } + public void testUpdate() throws IOException { XContentType xContentType = randomFrom(XContentType.values()); @@ -1055,7 +1068,7 @@ public void testExistsAliasNoAliasNoIndex() { IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Request.existsAlias(getAliasesRequest)); assertEquals("existsAlias requires at least an alias or an index", iae.getMessage()); } - + public void testRankEval() throws Exception { RankEvalSpec spec = new RankEvalSpec( Collections.singletonList(new RatedRequest("queryId", Collections.emptyList(), new SearchSourceBuilder())), diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index f43ec7b3e283a..067b6c7c8bfc2 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -38,10 +38,13 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RestHighLevelClient; @@ -610,6 +613,71 @@ public void onFailure(Exception e) { } } + public void testRefreshIndex() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + createIndex("index1", Settings.EMPTY); + createIndex("index2", Settings.EMPTY); + } + + { + // tag::refresh-request + RefreshRequest request = new RefreshRequest("index1"); // <1> + RefreshRequest requestMultiple = new RefreshRequest("index1", "index2"); // <2> + RefreshRequest requestAll = new RefreshRequest(); // <3> + // end::refresh-request + + // tag::refresh-execute + RefreshResponse refreshResponse = client.indices().refresh(request); + // end::refresh-execute + + // tag::refresh-response + int totalShards = refreshResponse.getTotalShards(); // <1> + int successfulShards = refreshResponse.getSuccessfulShards(); // <2> + int failedShards = refreshResponse.getFailedShards(); // <3> + DefaultShardOperationFailedException[] failures = refreshResponse.getShardFailures(); // <4> + // end::refresh-response + + // tag::refresh-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(RefreshResponse refreshResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::refresh-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::refresh-execute-async + client.indices().refreshAsync(request, listener); // <1> + // end::refresh-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + + { + // tag::refresh-notfound + try { + RefreshRequest request = new RefreshRequest("does_not_exist"); + client.indices().refresh(request); + } catch (ElasticsearchException exception) { + if (exception.status() == RestStatus.NOT_FOUND) { + // <1> + } + } + // end::refresh-notfound + } + } + public void testCloseIndex() throws Exception { RestHighLevelClient client = highLevelClient(); diff --git a/docs/java-rest/high-level/indices/refresh.asciidoc b/docs/java-rest/high-level/indices/refresh.asciidoc new file mode 100644 index 0000000000000..e9e6edfbd5bca --- /dev/null +++ b/docs/java-rest/high-level/indices/refresh.asciidoc @@ -0,0 +1,75 @@ +[[java-rest-high-refresh]] +=== Refresh API + +[[java-rest-high-refresh-request]] +==== Refresh Request + +A `RefreshRequest` can be applied to one or more indices, or even on `_all` the indices: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[refresh-request] +-------------------------------------------------- +<1> Refresh one index +<2> Refresh multiple indices +<3> Refresh all the indices + +[[java-rest-high-refresh-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[refresh-execute] +-------------------------------------------------- + +[[java-rest-high-refresh-async]] +==== Asynchronous Execution + +The asynchronous execution of a refresh request requires both the `RefreshRequest` +instance and an `ActionListener` instance to be passed to the asynchronous +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[refresh-execute-async] +-------------------------------------------------- +<1> The `RefreshRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `RefreshResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[refresh-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-refresh-response]] +==== Refresh Response + +The returned `RefreshResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[refresh-response] +-------------------------------------------------- +<1> total shards number of requested indices +<2> successfully refreshed shards number +<3> failed to refresh shards number +<4> an array of `DefaultShardOperationFailedException` if exist, otherwise it will be an empty array + +If the indices were not found, an `ElasticsearchException` will be thrown: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[refresh-notfound] +-------------------------------------------------- +<1> Do something if the indices to be refreshed were not found \ No newline at end of file diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponseTests.java index a278641405f65..6ce9bd9b092c3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponseTests.java @@ -26,9 +26,6 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.junit.BeforeClass; @@ -37,60 +34,42 @@ import java.util.List; import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; +import static org.hamcrest.CoreMatchers.both; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; public class RefreshResponseTests extends ESTestCase { - private static List failures = new ArrayList<>(); - - private static class FakeElasticsearchException extends ElasticsearchException { - - private Index index; - private ShardId shardId; - private RestStatus status; - - public FakeElasticsearchException(String index, int shardId, RestStatus status, String msg) { - super(msg); - this.index = new Index(index, "_na_"); - this.shardId = new ShardId(this.index, shardId); - this.status = status; - } - - @Override - public Index getIndex() { - return this.index; - } - - @Override - public ShardId getShardId() { - return this.shardId; - } - - @Override - public RestStatus status() { - return this.status; - } - } + private static List failures; @BeforeClass public static void prepareException() { failures = new ArrayList<>(); - failures.add(new DefaultShardOperationFailedException( - new FakeElasticsearchException("index1", 1, RestStatus.INTERNAL_SERVER_ERROR, "fake exception 1"))); - failures.add(new DefaultShardOperationFailedException( - new FakeElasticsearchException("index2", 2, RestStatus.GATEWAY_TIMEOUT, "fake exception 2"))); + failures.add(new DefaultShardOperationFailedException(new ElasticsearchException("exception message 1"))); + failures.add(new DefaultShardOperationFailedException(new ElasticsearchException("exception message 2"))); } public void testToXContent() { - RefreshResponse response = new RefreshResponse(10, 10, 0, null); - String output = Strings.toString(response); - assertEquals("{\"_shards\":{\"total\":10,\"successful\":10,\"failed\":0}}", output); + { + RefreshResponse response = new RefreshResponse(10, 10, 0, null); + String output = Strings.toString(response); + assertEquals("{\"_shards\":{\"total\":10,\"successful\":10,\"failed\":0}}", output); + } + { + RefreshResponse responseWithFailures = new RefreshResponse(10, 10, 0, failures); + String output = Strings.toString(responseWithFailures); + assertThat(output, both(containsString("exception message 1")).and(containsString("exception message 2"))); + } } public void testToAndFromXContent() throws IOException { doFromXContentTestWithRandomFields(false); } + public void testFromXContentWithRandomFields() throws IOException { + doFromXContentTestWithRandomFields(true); + } + private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { RefreshResponse response = new RefreshResponse(10, 10, 0, failures); boolean humanReadable = randomBoolean(); @@ -108,6 +87,17 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws assertThat(response.getTotalShards(), equalTo(parsedResponse.getTotalShards())); assertThat(response.getSuccessfulShards(), equalTo(parsedResponse.getSuccessfulShards())); assertThat(response.getFailedShards(), equalTo(parsedResponse.getFailedShards())); - assertThat(response.getShardFailures(), equalTo(parsedResponse.getShardFailures())); + compareFailures(response.getShardFailures(), parsedResponse.getShardFailures()); + } + + private static void compareFailures(DefaultShardOperationFailedException[] original, + DefaultShardOperationFailedException[] parsedback) { + assertThat(original.length, equalTo(parsedback.length)); + for (int i = 0; i < original.length; i++) { + assertThat(original[i].index(), equalTo(parsedback[i].index())); + assertThat(original[i].shardId(), equalTo(parsedback[i].shardId())); + assertThat(original[i].status(), equalTo(parsedback[i].status())); + assertThat(parsedback[i].getCause().getMessage(), containsString(original[i].getCause().getMessage())); + } } } From 75812735c86334952baa43dc1805cfc40376bdbc Mon Sep 17 00:00:00 2001 From: javanna Date: Mon, 12 Feb 2018 18:23:38 +0100 Subject: [PATCH 07/12] add refresh page --- docs/java-rest/high-level/supported-apis.asciidoc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 14e4351eb29bd..59de32019f45c 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -52,6 +52,7 @@ Index Management:: * <> * <> * <> +* <> Mapping Management:: * <> @@ -67,6 +68,7 @@ include::indices/open_index.asciidoc[] include::indices/close_index.asciidoc[] include::indices/shrink_index.asciidoc[] include::indices/split_index.asciidoc[] +include::indices/refresh.asciidoc[] include::indices/put_mapping.asciidoc[] include::indices/update_aliases.asciidoc[] include::indices/exists_alias.asciidoc[] From 0168ed50aaf290748df2db518a663adc9f1ae3bb Mon Sep 17 00:00:00 2001 From: Yu Date: Mon, 19 Feb 2018 18:02:22 +0100 Subject: [PATCH 08/12] update --- .../DefaultShardOperationFailedException.java | 6 ++- .../indices/refresh/RefreshResponseTests.java | 42 +++++++++------ ...ultShardOperationFailedExceptionTests.java | 52 +++++++++++++++++++ 3 files changed, 83 insertions(+), 17 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/action/support/DefaultShardOperationFailedExceptionTests.java diff --git a/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java b/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java index da0489b749a95..42e6b32643478 100644 --- a/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java +++ b/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java @@ -39,12 +39,16 @@ public class DefaultShardOperationFailedException implements ShardOperationFailedException { + private static final String INDEX = "index"; + private static final String SHARDID = "shard"; private static final String REASON = "reason"; private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "failures", true, arg -> new DefaultShardOperationFailedException((ElasticsearchException) arg[0])); + "failures", true, arg -> new DefaultShardOperationFailedException((String) arg[0], (int) arg[1] ,(Throwable) arg[2])); static { + PARSER.declareString(constructorArg(), new ParseField(INDEX)); + PARSER.declareInt(constructorArg(), new ParseField(SHARDID)); PARSER.declareObject(constructorArg(), (p, c) -> ElasticsearchException.fromXContent(p), new ParseField(REASON)); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponseTests.java index 6ce9bd9b092c3..4e4be3bad6c48 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponseTests.java @@ -26,6 +26,8 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.junit.BeforeClass; @@ -40,26 +42,14 @@ public class RefreshResponseTests extends ESTestCase { - private static List failures; - @BeforeClass public static void prepareException() { - failures = new ArrayList<>(); - failures.add(new DefaultShardOperationFailedException(new ElasticsearchException("exception message 1"))); - failures.add(new DefaultShardOperationFailedException(new ElasticsearchException("exception message 2"))); } public void testToXContent() { - { - RefreshResponse response = new RefreshResponse(10, 10, 0, null); - String output = Strings.toString(response); - assertEquals("{\"_shards\":{\"total\":10,\"successful\":10,\"failed\":0}}", output); - } - { - RefreshResponse responseWithFailures = new RefreshResponse(10, 10, 0, failures); - String output = Strings.toString(responseWithFailures); - assertThat(output, both(containsString("exception message 1")).and(containsString("exception message 2"))); - } + RefreshResponse response = new RefreshResponse(10, 10, 0, null); + String output = Strings.toString(response); + assertEquals("{\"_shards\":{\"total\":10,\"successful\":10,\"failed\":0}}", output); } public void testToAndFromXContent() throws IOException { @@ -71,7 +61,7 @@ public void testFromXContentWithRandomFields() throws IOException { } private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { - RefreshResponse response = new RefreshResponse(10, 10, 0, failures); + RefreshResponse response = createTestItem(10); boolean humanReadable = randomBoolean(); XContentType xContentType = randomFrom(XContentType.values()); BytesReference bytesReference = toShuffledXContent(response, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); @@ -100,4 +90,24 @@ private static void compareFailures(DefaultShardOperationFailedException[] origi assertThat(parsedback[i].getCause().getMessage(), containsString(original[i].getCause().getMessage())); } } + + private static RefreshResponse createTestItem(int totalShards) { + List failures = null; + int successfulShards = randomInt(totalShards); + int failedShards = totalShards - successfulShards; + if (failedShards > 0) { + failures = new ArrayList<>(); + for (int i = 0; i < failedShards; i++) { + ElasticsearchException exception = new ElasticsearchException("exception message " + i); + exception.setIndex(new Index("index" + i, "_na_")); + exception.setShard(new ShardId("index" + i, "_na_", i)); + if (randomBoolean()) { + failures.add(new DefaultShardOperationFailedException(exception)); + } else { + failures.add(new DefaultShardOperationFailedException("index" + i, i, new Exception("exception message " + i))); + } + } + } + return new RefreshResponse(totalShards, successfulShards, failedShards, failures); + } } diff --git a/server/src/test/java/org/elasticsearch/action/support/DefaultShardOperationFailedExceptionTests.java b/server/src/test/java/org/elasticsearch/action/support/DefaultShardOperationFailedExceptionTests.java new file mode 100644 index 0000000000000..41b71e4a90eec --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/support/DefaultShardOperationFailedExceptionTests.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESTestCase; + +public class DefaultShardOperationFailedExceptionTests extends ESTestCase { + + public void testToString() { + { + DefaultShardOperationFailedException exception = new DefaultShardOperationFailedException( + new ElasticsearchException("foo", new IllegalArgumentException("bar", new RuntimeException("baz")))); + assertEquals("[null][-1] failed, reason [ElasticsearchException[foo]; nested: " + + "IllegalArgumentException[bar]; nested: RuntimeException[baz]; ]", exception.toString()); + } + { + ElasticsearchException elasticsearchException = new ElasticsearchException("foo"); + elasticsearchException.setIndex(new Index("index1", "_na_")); + elasticsearchException.setShard(new ShardId("index1", "_na_", 1)); + DefaultShardOperationFailedException exception = new DefaultShardOperationFailedException(elasticsearchException); + assertEquals("[index1][1] failed, reason [ElasticsearchException[foo]]", exception.toString()); + } + { + DefaultShardOperationFailedException exception = new DefaultShardOperationFailedException("index2", 2, new Exception("foo")); + assertEquals("[index2][2] failed, reason [Exception[foo]]", exception.toString()); + } + } + + public void testToXContent() { + + } +} From fa9cf77c4ee60290c7c2d185563d8b3ebfe55560 Mon Sep 17 00:00:00 2001 From: PnPie Date: Wed, 21 Feb 2018 23:07:35 +0100 Subject: [PATCH 09/12] update --- .../IndicesClientDocumentationIT.java | 5 +- .../high-level/indices/refresh.asciidoc | 19 +++-- .../indices/refresh/RefreshResponseTests.java | 72 +++++++++++++++++-- 3 files changed, 85 insertions(+), 11 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index 3c49852bff209..a16d5e2f5fc74 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -620,7 +620,6 @@ public void testRefreshIndex() throws Exception { { createIndex("index1", Settings.EMPTY); - createIndex("index2", Settings.EMPTY); } { @@ -630,6 +629,10 @@ public void testRefreshIndex() throws Exception { RefreshRequest requestAll = new RefreshRequest(); // <3> // end::refresh-request + // tag::refresh-request-indicesOptions + request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> + // end::refresh-request-indicesOptions + // tag::refresh-execute RefreshResponse refreshResponse = client.indices().refresh(request); // end::refresh-execute diff --git a/docs/java-rest/high-level/indices/refresh.asciidoc b/docs/java-rest/high-level/indices/refresh.asciidoc index e9e6edfbd5bca..a0e8199be1b8e 100644 --- a/docs/java-rest/high-level/indices/refresh.asciidoc +++ b/docs/java-rest/high-level/indices/refresh.asciidoc @@ -14,6 +14,15 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[refresh-request] <2> Refresh multiple indices <3> Refresh all the indices +==== Optional arguments + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[refresh-request-indicesOptions] +-------------------------------------------------- +<1> Setting `IndicesOptions` controls how unavailable indices are resolved and +how wildcard expressions are expanded + [[java-rest-high-refresh-sync]] ==== Synchronous Execution @@ -61,12 +70,12 @@ executed operation as follows: -------------------------------------------------- include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[refresh-response] -------------------------------------------------- -<1> total shards number of requested indices -<2> successfully refreshed shards number -<3> failed to refresh shards number -<4> an array of `DefaultShardOperationFailedException` if exist, otherwise it will be an empty array +<1> total number of shards hit by the refresh request +<2> number of shards where the refresh has succeeded +<3> number of shards where the refresh has failed +<4> a list of failures if the operation failed on one or more shards -If the indices were not found, an `ElasticsearchException` will be thrown: +By default, if the indices were not found, an `ElasticsearchException` will be thrown: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponseTests.java index 51a3a2a9ee4c1..dbdb807fb5723 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponseTests.java @@ -28,15 +28,16 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; -import org.junit.BeforeClass; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; -import static org.hamcrest.CoreMatchers.both; +import static org.hamcrest.CoreMatchers.anyOf; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; @@ -56,6 +57,67 @@ public void testFromXContentWithRandomFields() throws IOException { doFromXContentTestWithRandomFields(true); } + public void testFailuresDeduplication() throws IOException { + List failures = new ArrayList<>(); + Index index = new Index("test", "_na_"); + ElasticsearchException exception1 = new ElasticsearchException("foo", new IllegalArgumentException("bar")); + exception1.setIndex(index); + exception1.setShard(new ShardId(index, 0)); + ElasticsearchException exception2 = new ElasticsearchException("foo", new IllegalArgumentException("bar")); + exception2.setIndex(index); + exception2.setShard(new ShardId(index, 1)); + ElasticsearchException exception3 = new ElasticsearchException("fizz", new IllegalStateException("buzz")); + exception3.setIndex(index); + exception3.setShard(new ShardId(index, 2)); + failures.add(new DefaultShardOperationFailedException(exception1)); + failures.add(new DefaultShardOperationFailedException(exception2)); + failures.add(new DefaultShardOperationFailedException(exception3)); + + RefreshResponse response = new RefreshResponse(10, 7, 3, failures); + boolean humanReadable = randomBoolean(); + XContentType xContentType = randomFrom(XContentType.values()); + BytesReference bytesReference = toShuffledXContent(response, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); + RefreshResponse parsedResponse; + try(XContentParser parser = createParser(xContentType.xContent(), bytesReference)) { + parsedResponse = RefreshResponse.fromXContent(parser); + assertNull(parser.nextToken()); + } + + assertThat(parsedResponse.getShardFailures().length, equalTo(2)); + DefaultShardOperationFailedException[] parsedFailures = parsedResponse.getShardFailures(); + assertThat(parsedFailures[0].index(), equalTo("test")); + assertThat(parsedFailures[0].shardId(), anyOf(equalTo(0), equalTo(1))); + assertThat(parsedFailures[0].status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); + assertThat(parsedFailures[0].getCause().getMessage(), containsString("foo")); + assertThat(parsedFailures[1].index(), equalTo("test")); + assertThat(parsedFailures[1].shardId(), equalTo(2)); + assertThat(parsedFailures[1].status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); + assertThat(parsedFailures[1].getCause().getMessage(), containsString("fizz")); + + ToXContent.Params params = new ToXContent.MapParams(Collections.singletonMap("group_shard_failures", "false")); + BytesReference bytesReferenceWithoutDedup = toShuffledXContent(response, xContentType, params, humanReadable); + try(XContentParser parser = createParser(xContentType.xContent(), bytesReferenceWithoutDedup)) { + parsedResponse = RefreshResponse.fromXContent(parser); + assertNull(parser.nextToken()); + } + + assertThat(parsedResponse.getShardFailures().length, equalTo(3)); + parsedFailures = parsedResponse.getShardFailures(); + for (int i = 0; i < 3; i++) { + if (i < 2) { + assertThat(parsedFailures[i].index(), equalTo("test")); + assertThat(parsedFailures[i].shardId(), equalTo(i)); + assertThat(parsedFailures[i].status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); + assertThat(parsedFailures[i].getCause().getMessage(), containsString("foo")); + } else { + assertThat(parsedFailures[i].index(), equalTo("test")); + assertThat(parsedFailures[i].shardId(), equalTo(i)); + assertThat(parsedFailures[i].status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); + assertThat(parsedFailures[i].getCause().getMessage(), containsString("fizz")); + } + } + } + private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { RefreshResponse response = createTestItem(10); boolean humanReadable = randomBoolean(); @@ -73,11 +135,11 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws assertThat(response.getTotalShards(), equalTo(parsedResponse.getTotalShards())); assertThat(response.getSuccessfulShards(), equalTo(parsedResponse.getSuccessfulShards())); assertThat(response.getFailedShards(), equalTo(parsedResponse.getFailedShards())); - compareFailures(response.getShardFailures(), parsedResponse.getShardFailures()); + assertFailureEquals(response.getShardFailures(), parsedResponse.getShardFailures()); } - private static void compareFailures(DefaultShardOperationFailedException[] original, - DefaultShardOperationFailedException[] parsedback) { + private static void assertFailureEquals(DefaultShardOperationFailedException[] original, + DefaultShardOperationFailedException[] parsedback) { assertThat(original.length, equalTo(parsedback.length)); for (int i = 0; i < original.length; i++) { assertThat(original[i].index(), equalTo(parsedback[i].index())); From 9c03b748c1c31778697f5691bc048adc564b5df6 Mon Sep 17 00:00:00 2001 From: Yu Date: Sat, 24 Feb 2018 11:33:22 +0100 Subject: [PATCH 10/12] small changes --- .../elasticsearch/client/IndicesClientIT.java | 15 ++++------ .../DefaultShardOperationFailedException.java | 4 +-- .../support/broadcast/BroadcastResponse.java | 1 - ...ultShardOperationFailedExceptionTests.java | 28 +++++++------------ 4 files changed, 17 insertions(+), 31 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 6f8abd1f4bfcb..7799171b45b56 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -48,8 +48,8 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -65,9 +65,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.both; public class IndicesClientIT extends ESRestHighLevelClientTestCase { @@ -388,20 +386,17 @@ public void testCloseNonExistentIndex() throws IOException { public void testRefresh() throws IOException { { - final int numberOfShards = randomIntBetween(1, 5); - final int numberOfReplicas = randomIntBetween(1, 3); String index = "index"; Settings settings = Settings.builder() - .put("number_of_shards", numberOfShards) - .put("number_of_replicas", numberOfReplicas) + .put("number_of_shards", 1) + .put("number_of_replicas", 0) .build(); createIndex(index, settings); RefreshRequest refreshRequest = new RefreshRequest(index); RefreshResponse refreshResponse = execute(refreshRequest, highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync); - assertThat(refreshResponse.getTotalShards(), equalTo(numberOfShards * (numberOfReplicas + 1))); - assertThat(refreshResponse.getSuccessfulShards(), greaterThan(0)); - assertThat(refreshResponse.getSuccessfulShards() % numberOfShards, equalTo(0)); + assertThat(refreshResponse.getTotalShards(), equalTo(1)); + assertThat(refreshResponse.getSuccessfulShards(), equalTo(1)); assertThat(refreshResponse.getFailedShards(), equalTo(0)); assertThat(refreshResponse.getShardFailures(), equalTo(BroadcastResponse.EMPTY)); } diff --git a/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java b/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java index 42e6b32643478..8a4a787fbe5f2 100644 --- a/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java +++ b/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java @@ -40,7 +40,7 @@ public class DefaultShardOperationFailedException implements ShardOperationFailedException { private static final String INDEX = "index"; - private static final String SHARDID = "shard"; + private static final String SHARD_ID = "shard"; private static final String REASON = "reason"; private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( @@ -48,7 +48,7 @@ public class DefaultShardOperationFailedException implements ShardOperationFaile static { PARSER.declareString(constructorArg(), new ParseField(INDEX)); - PARSER.declareInt(constructorArg(), new ParseField(SHARDID)); + PARSER.declareInt(constructorArg(), new ParseField(SHARD_ID)); PARSER.declareObject(constructorArg(), (p, c) -> ElasticsearchException.fromXContent(p), new ParseField(REASON)); } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java index d65bc79aec680..cb2fc23febbf2 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java @@ -149,7 +149,6 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - // TODO: move BroadcastResponse building codes from RestActions to itself here RestActions.buildBroadcastShardsHeader(builder, params, totalShards, successfulShards, -1, failedShards, shardFailures); return builder; } diff --git a/server/src/test/java/org/elasticsearch/action/support/DefaultShardOperationFailedExceptionTests.java b/server/src/test/java/org/elasticsearch/action/support/DefaultShardOperationFailedExceptionTests.java index ea1e8c8412863..28099506e08e6 100644 --- a/server/src/test/java/org/elasticsearch/action/support/DefaultShardOperationFailedExceptionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/DefaultShardOperationFailedExceptionTests.java @@ -21,10 +21,9 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; -import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; @@ -59,27 +58,27 @@ public void testToString() { public void testToXContent() throws IOException { { DefaultShardOperationFailedException exception = new DefaultShardOperationFailedException(new ElasticsearchException("foo")); - assertExceptionAsJson(exception, "{\"shard\":-1,\"index\":null,\"status\":\"INTERNAL_SERVER_ERROR\"," + - "\"reason\":{\"type\":\"exception\",\"reason\":\"foo\"}}"); + assertEquals("{\"shard\":-1,\"index\":null,\"status\":\"INTERNAL_SERVER_ERROR\"," + + "\"reason\":{\"type\":\"exception\",\"reason\":\"foo\"}}", Strings.toString(exception)); } { DefaultShardOperationFailedException exception = new DefaultShardOperationFailedException( new ElasticsearchException("foo", new IllegalArgumentException("bar"))); - assertExceptionAsJson(exception, "{\"shard\":-1,\"index\":null,\"status\":\"INTERNAL_SERVER_ERROR\"," + - "\"reason\":{\"type\":\"exception\"," + - "\"reason\":\"foo\",\"caused_by\":{\"type\":\"illegal_argument_exception\",\"reason\":\"bar\"}}}"); + assertEquals("{\"shard\":-1,\"index\":null,\"status\":\"INTERNAL_SERVER_ERROR\",\"reason\":{\"type\":\"exception\"," + + "\"reason\":\"foo\",\"caused_by\":{\"type\":\"illegal_argument_exception\",\"reason\":\"bar\"}}}", + Strings.toString(exception)); } { DefaultShardOperationFailedException exception = new DefaultShardOperationFailedException( new BroadcastShardOperationFailedException(new ShardId("test", "_uuid", 2), "foo", new IllegalStateException("bar"))); - assertExceptionAsJson(exception, "{\"shard\":2,\"index\":\"test\",\"status\":\"INTERNAL_SERVER_ERROR\"," + - "\"reason\":{\"type\":\"illegal_state_exception\",\"reason\":\"bar\"}}"); + assertEquals("{\"shard\":2,\"index\":\"test\",\"status\":\"INTERNAL_SERVER_ERROR\"," + + "\"reason\":{\"type\":\"illegal_state_exception\",\"reason\":\"bar\"}}", Strings.toString(exception)); } { DefaultShardOperationFailedException exception = new DefaultShardOperationFailedException("test", 1, new IllegalArgumentException("foo")); - assertExceptionAsJson(exception, "{\"shard\":1,\"index\":\"test\",\"status\":\"BAD_REQUEST\"," + - "\"reason\":{\"type\":\"illegal_argument_exception\",\"reason\":\"foo\"}}"); + assertEquals("{\"shard\":1,\"index\":\"test\",\"status\":\"BAD_REQUEST\"," + + "\"reason\":{\"type\":\"illegal_argument_exception\",\"reason\":\"foo\"}}", Strings.toString(exception)); } } @@ -110,11 +109,4 @@ public void testFromXContent() throws IOException { assertEquals(parsed.status(), RestStatus.INTERNAL_SERVER_ERROR); assertEquals(parsed.getCause().getMessage(), "Elasticsearch exception [type=exception, reason=foo]"); } - - private static void assertExceptionAsJson(DefaultShardOperationFailedException exception, String expectedJson) throws IOException { - BytesReference exceptionBytes = XContentHelper.toXContent( - (builder, params) -> exception.toXContent(builder, params), XContentType.JSON, randomBoolean()); - String exceptionJson = XContentHelper.convertToJson(exceptionBytes, false, XContentType.JSON); - assertEquals(exceptionJson, expectedJson); - } } From 9fc4529006b41930b397700e3dd6f23866e8c41f Mon Sep 17 00:00:00 2001 From: PnPie Date: Tue, 27 Feb 2018 22:04:44 +0100 Subject: [PATCH 11/12] reformat code --- .../action/support/broadcast/BroadcastResponse.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java index cb2fc23febbf2..75f205904a0c1 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java @@ -62,7 +62,8 @@ protected static void declareBroadcastFields(Const shardsParser.declareInt(constructorArg(), TOTAL_FIELD); shardsParser.declareInt(constructorArg(), SUCCESSFUL_FIELD); shardsParser.declareInt(constructorArg(), FAILED_FIELD); - shardsParser.declareObjectArray(optionalConstructorArg(), (p, c) -> DefaultShardOperationFailedException.fromXContent(p), FAILURES_FIELD); + shardsParser.declareObjectArray(optionalConstructorArg(), + (p, c) -> DefaultShardOperationFailedException.fromXContent(p), FAILURES_FIELD); PARSER.declareObject(constructorArg(), shardsParser, _SHARDS_FIELD); } From 364de2648b881d09ca3423a9583ed120251473f3 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 28 Feb 2018 11:48:15 +0100 Subject: [PATCH 12/12] address last tiny bits --- docs/java-rest/high-level/indices/refresh.asciidoc | 8 ++++---- .../action/support/broadcast/BroadcastResponse.java | 3 +-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/docs/java-rest/high-level/indices/refresh.asciidoc b/docs/java-rest/high-level/indices/refresh.asciidoc index a0e8199be1b8e..f61c1c37d4ee1 100644 --- a/docs/java-rest/high-level/indices/refresh.asciidoc +++ b/docs/java-rest/high-level/indices/refresh.asciidoc @@ -70,10 +70,10 @@ executed operation as follows: -------------------------------------------------- include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[refresh-response] -------------------------------------------------- -<1> total number of shards hit by the refresh request -<2> number of shards where the refresh has succeeded -<3> number of shards where the refresh has failed -<4> a list of failures if the operation failed on one or more shards +<1> Total number of shards hit by the refresh request +<2> Number of shards where the refresh has succeeded +<3> Number of shards where the refresh has failed +<4> A list of failures if the operation failed on one or more shards By default, if the indices were not found, an `ElasticsearchException` will be thrown: diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java index 75f205904a0c1..ce812644faea6 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentFragment; -import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestActions; @@ -150,7 +149,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - RestActions.buildBroadcastShardsHeader(builder, params, totalShards, successfulShards, -1, failedShards, shardFailures); + RestActions.buildBroadcastShardsHeader(builder, params, this); return builder; } }