diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index c273e76a92aed..222de9608aeb9 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -40,6 +40,7 @@ dependencies { compile "org.elasticsearch.plugin:parent-join-client:${version}" compile "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}" compile "org.elasticsearch.plugin:rank-eval-client:${version}" + compile "org.elasticsearch.plugin:lang-mustache-client:${version}" testCompile "org.elasticsearch.client:test:${version}" testCompile "org.elasticsearch.test:framework:${version}" diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java index f3c84db79d65f..e78e4686d6991 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java @@ -21,6 +21,8 @@ import org.apache.http.Header; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; @@ -63,4 +65,26 @@ public void putSettingsAsync(ClusterUpdateSettingsRequest clusterUpdateSettingsR restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings, ClusterUpdateSettingsResponse::fromXContent, listener, emptySet(), headers); } + + /** + * Get current tasks using the Task Management API + *

+ * See + * Task Management API on elastic.co + */ + public ListTasksResponse listTasks(ListTasksRequest request, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::listTasks, ListTasksResponse::fromXContent, + emptySet(), headers); + } + + /** + * Asynchronously get current tasks using the Task Management API + *

+ * See + * Task Management API on elastic.co + */ + public void listTasksAsync(ListTasksRequest request, ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::listTasks, ListTasksResponse::fromXContent, + listener, emptySet(), headers); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 2e7b4ba74cc39..a5a6b9f7bd271 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -29,7 +29,9 @@ import org.apache.http.entity.ContentType; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; @@ -44,8 +46,8 @@ import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; @@ -80,7 +82,9 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.rankeval.RankEvalRequest; import org.elasticsearch.rest.action.search.RestSearchAction; +import org.elasticsearch.script.mustache.SearchTemplateRequest; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.tasks.TaskId; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -458,6 +462,15 @@ static Request search(SearchRequest searchRequest) throws IOException { Request request = new Request(HttpPost.METHOD_NAME, endpoint(searchRequest.indices(), searchRequest.types(), "_search")); Params params = new Params(request); + addSearchRequestParams(params, searchRequest); + + if (searchRequest.source() != null) { + request.setEntity(createEntity(searchRequest.source(), REQUEST_BODY_CONTENT_TYPE)); + } + return request; + } + + private static void addSearchRequestParams(Params params, SearchRequest searchRequest) { params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true"); params.withRouting(searchRequest.routing()); params.withPreference(searchRequest.preference()); @@ -473,11 +486,6 @@ static Request search(SearchRequest searchRequest) throws IOException { if (searchRequest.scroll() != null) { params.putParam("scroll", searchRequest.scroll().keepAlive()); } - - if (searchRequest.source() != null) { - request.setEntity(createEntity(searchRequest.source(), REQUEST_BODY_CONTENT_TYPE)); - } - return request; } static Request searchScroll(SearchScrollRequest searchScrollRequest) throws IOException { @@ -507,6 +515,24 @@ static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOExcep return request; } + static Request searchTemplate(SearchTemplateRequest searchTemplateRequest) throws IOException { + Request request; + + if (searchTemplateRequest.isSimulate()) { + request = new Request(HttpGet.METHOD_NAME, "_render/template"); + } else { + SearchRequest searchRequest = searchTemplateRequest.getRequest(); + String endpoint = endpoint(searchRequest.indices(), searchRequest.types(), "_search/template"); + request = new Request(HttpGet.METHOD_NAME, endpoint); + + Params params = new Params(request); + addSearchRequestParams(params, searchRequest); + } + + request.setEntity(createEntity(searchTemplateRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request existsAlias(GetAliasesRequest getAliasesRequest) { if ((getAliasesRequest.indices() == null || getAliasesRequest.indices().length == 0) && (getAliasesRequest.aliases() == null || getAliasesRequest.aliases().length == 0)) { @@ -582,6 +608,22 @@ static Request clusterPutSettings(ClusterUpdateSettingsRequest clusterUpdateSett return request; } + static Request listTasks(ListTasksRequest listTaskRequest) { + if (listTaskRequest.getTaskId() != null && listTaskRequest.getTaskId().isSet()) { + throw new IllegalArgumentException("TaskId cannot be used for list tasks request"); + } + Request request = new Request(HttpGet.METHOD_NAME, "/_tasks"); + Params params = new Params(request); + params.withTimeout(listTaskRequest.getTimeout()) + .withDetailed(listTaskRequest.getDetailed()) + .withWaitForCompletion(listTaskRequest.getWaitForCompletion()) + .withParentTaskId(listTaskRequest.getParentTaskId()) + .withNodes(listTaskRequest.getNodes()) + .withActions(listTaskRequest.getActions()) + .putParam("group_by", "none"); + return request; + } + static Request rollover(RolloverRequest rolloverRequest) throws IOException { String endpoint = new EndpointBuilder().addPathPart(rolloverRequest.getAlias()).addPathPartAsIs("_rollover") .addPathPart(rolloverRequest.getNewIndexName()).build(); @@ -656,6 +698,19 @@ static Request getRepositories(GetRepositoriesRequest getRepositoriesRequest) { return request; } + static Request createRepository(PutRepositoryRequest putRepositoryRequest) throws IOException { + String endpoint = new EndpointBuilder().addPathPart("_snapshot").addPathPart(putRepositoryRequest.name()).build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + + Params parameters = new Params(request); + parameters.withMasterTimeout(putRepositoryRequest.masterNodeTimeout()); + parameters.withTimeout(putRepositoryRequest.timeout()); + parameters.withVerify(putRepositoryRequest.verify()); + + request.setEntity(createEntity(putRepositoryRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) throws IOException { String endpoint = new EndpointBuilder().addPathPartAsIs("_template").addPathPart(putIndexTemplateRequest.name()).build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); @@ -894,6 +949,48 @@ Params withPreserveExisting(boolean preserveExisting) { } return this; } + + Params withDetailed(boolean detailed) { + if (detailed) { + return putParam("detailed", Boolean.TRUE.toString()); + } + return this; + } + + Params withWaitForCompletion(boolean waitForCompletion) { + if (waitForCompletion) { + return putParam("wait_for_completion", Boolean.TRUE.toString()); + } + return this; + } + + Params withNodes(String[] nodes) { + if (nodes != null && nodes.length > 0) { + return putParam("nodes", String.join(",", nodes)); + } + return this; + } + + Params withActions(String[] actions) { + if (actions != null && actions.length > 0) { + return putParam("actions", String.join(",", actions)); + } + return this; + } + + Params withParentTaskId(TaskId parentTaskId) { + if (parentTaskId != null && parentTaskId.isSet()) { + return putParam("parent_task_id", parentTaskId.toString()); + } + return this; + } + + Params withVerify(boolean verify) { + if (verify) { + return putParam("verify", Boolean.TRUE.toString()); + } + return this; + } } /** diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 1985d6bd06dd4..5dbf2709d9988 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -64,6 +64,8 @@ import org.elasticsearch.plugins.spi.NamedXContentProvider; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.script.mustache.SearchTemplateRequest; +import org.elasticsearch.script.mustache.SearchTemplateResponse; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.bucket.adjacency.AdjacencyMatrixAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.adjacency.ParsedAdjacencyMatrix; @@ -501,6 +503,32 @@ public final void clearScrollAsync(ClearScrollRequest clearScrollRequest, listener, emptySet(), headers); } + /** + * Executes a request using the Search Template API. + * + * See Search Template API + * on elastic.co. + */ + public final SearchTemplateResponse searchTemplate(SearchTemplateRequest searchTemplateRequest, + Header... headers) throws IOException { + return performRequestAndParseEntity(searchTemplateRequest, RequestConverters::searchTemplate, + SearchTemplateResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously executes a request using the Search Template API + * + * See Search Template API + * on elastic.co. + */ + public final void searchTemplateAsync(SearchTemplateRequest searchTemplateRequest, + ActionListener listener, + Header... headers) { + performRequestAsyncAndParseEntity(searchTemplateRequest, RequestConverters::searchTemplate, + SearchTemplateResponse::fromXContent, listener, emptySet(), headers); + } + + /** * Executes a request using the Ranking Evaluation API. * diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java index e526fbe7164f9..aec94586bee30 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java @@ -23,8 +23,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; -import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; -import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import java.io.IOException; @@ -67,4 +67,27 @@ public void getRepositoriesAsync(GetRepositoriesRequest getRepositoriesRequest, restHighLevelClient.performRequestAsyncAndParseEntity(getRepositoriesRequest, RequestConverters::getRepositories, GetRepositoriesResponse::fromXContent, listener, emptySet(), headers); } + + /** + * Creates a snapshot repository. + *

+ * See Snapshot and Restore + * API on elastic.co + */ + public PutRepositoryResponse createRepository(PutRepositoryRequest putRepositoryRequest, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(putRepositoryRequest, RequestConverters::createRepository, + PutRepositoryResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously creates a snapshot repository. + *

+ * See Snapshot and Restore + * API on elastic.co + */ + public void createRepositoryAsync(PutRepositoryRequest putRepositoryRequest, + ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(putRepositoryRequest, RequestConverters::createRepository, + PutRepositoryResponse::fromXContent, listener, emptySet(), headers); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java index 9314bb2e36cea..fa3086442f528 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java @@ -20,6 +20,9 @@ package org.elasticsearch.client; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; @@ -29,13 +32,16 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.tasks.TaskInfo; import java.io.IOException; import java.util.HashMap; import java.util.Map; +import static java.util.Collections.emptyList; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -105,4 +111,29 @@ public void testClusterUpdateSettingNonExistent() { assertThat(exception.getMessage(), equalTo( "Elasticsearch exception [type=illegal_argument_exception, reason=transient setting [" + setting + "], not recognized]")); } + + public void testListTasks() throws IOException { + ListTasksRequest request = new ListTasksRequest(); + ListTasksResponse response = execute(request, highLevelClient().cluster()::listTasks, highLevelClient().cluster()::listTasksAsync); + + assertThat(response, notNullValue()); + assertThat(response.getNodeFailures(), equalTo(emptyList())); + assertThat(response.getTaskFailures(), equalTo(emptyList())); + // It's possible that there are other tasks except 'cluster:monitor/tasks/lists[n]' and 'action":"cluster:monitor/tasks/lists' + assertThat(response.getTasks().size(), greaterThanOrEqualTo(2)); + boolean listTasksFound = false; + for (TaskGroup taskGroup : response.getTaskGroups()) { + TaskInfo parent = taskGroup.getTaskInfo(); + if ("cluster:monitor/tasks/lists".equals(parent.getAction())) { + assertThat(taskGroup.getChildTasks().size(), equalTo(1)); + TaskGroup childGroup = taskGroup.getChildTasks().iterator().next(); + assertThat(childGroup.getChildTasks().isEmpty(), equalTo(true)); + TaskInfo child = childGroup.getTaskInfo(); + assertThat(child.getAction(), equalTo("cluster:monitor/tasks/lists[n]")); + assertThat(child.getParentTaskId(), equalTo(parent.getTaskId())); + listTasksFound = true; + } + } + assertTrue("List tasks were not found", listTasksFound); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 2d4ef8b6413d9..4a0276e74d228 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -29,7 +29,9 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; @@ -77,9 +79,11 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -94,7 +98,10 @@ import org.elasticsearch.index.rankeval.RankEvalSpec; import org.elasticsearch.index.rankeval.RatedRequest; import org.elasticsearch.index.rankeval.RestRankEvalAction; +import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.action.search.RestSearchAction; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.script.mustache.SearchTemplateRequest; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; @@ -105,11 +112,13 @@ import org.elasticsearch.search.rescore.QueryRescorerBuilder; import org.elasticsearch.search.suggest.SuggestBuilder; import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.RandomObjects; import java.io.IOException; import java.io.InputStream; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -135,6 +144,7 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; public class RequestConvertersTests extends ESTestCase { @@ -181,8 +191,7 @@ public void testMultiGet() throws IOException { int numberOfRequests = randomIntBetween(0, 32); for (int i = 0; i < numberOfRequests; i++) { - MultiGetRequest.Item item = - new MultiGetRequest.Item(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4)); + MultiGetRequest.Item item = new MultiGetRequest.Item(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4)); if (randomBoolean()) { item.routing(randomAlphaOfLength(4)); } @@ -261,7 +270,7 @@ public void testIndicesExist() { public void testIndicesExistEmptyIndices() { expectThrows(IllegalArgumentException.class, () -> RequestConverters.indicesExist(new GetIndexRequest())); - expectThrows(IllegalArgumentException.class, () -> RequestConverters.indicesExist(new GetIndexRequest().indices((String[])null))); + expectThrows(IllegalArgumentException.class, () -> RequestConverters.indicesExist(new GetIndexRequest().indices((String[]) null))); } private static void getAndExistsTest(Function requestConverter, String method) { @@ -415,7 +424,8 @@ public void testGetSettings() throws IOException { setRandomLocal(getSettingsRequest, expectedParams); if (randomBoolean()) { - //the request object will not have include_defaults present unless it is set to true + // the request object will not have include_defaults present unless it is set to + // true getSettingsRequest.includeDefaults(randomBoolean()); if (getSettingsRequest.includeDefaults()) { expectedParams.put("include_defaults", Boolean.toString(true)); @@ -959,22 +969,21 @@ public void testBulkWithDifferentContentTypes() throws IOException { bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), XContentType.SMILE)); bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), XContentType.JSON)); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest)); - assertEquals("Mismatching content-type found for request with content-type [JSON], " + - "previous requests have content-type [SMILE]", exception.getMessage()); + assertEquals( + "Mismatching content-type found for request with content-type [JSON], " + "previous requests have content-type [SMILE]", + exception.getMessage()); } { BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(new IndexRequest("index", "type", "0") - .source(singletonMap("field", "value"), XContentType.JSON)); - bulkRequest.add(new IndexRequest("index", "type", "1") - .source(singletonMap("field", "value"), XContentType.JSON)); + bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), XContentType.JSON)); + bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), XContentType.JSON)); bulkRequest.add(new UpdateRequest("index", "type", "2") .doc(new IndexRequest().source(singletonMap("field", "value"), XContentType.JSON)) - .upsert(new IndexRequest().source(singletonMap("field", "value"), XContentType.SMILE)) - ); + .upsert(new IndexRequest().source(singletonMap("field", "value"), XContentType.SMILE))); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest)); - assertEquals("Mismatching content-type found for request with content-type [SMILE], " + - "previous requests have content-type [JSON]", exception.getMessage()); + assertEquals( + "Mismatching content-type found for request with content-type [SMILE], " + "previous requests have content-type [JSON]", + exception.getMessage()); } { XContentType xContentType = randomFrom(XContentType.CBOR, XContentType.YAML); @@ -1011,42 +1020,14 @@ public void testSearch() throws Exception { searchRequest.types(types); Map expectedParams = new HashMap<>(); - expectedParams.put(RestSearchAction.TYPED_KEYS_PARAM, "true"); - if (randomBoolean()) { - searchRequest.routing(randomAlphaOfLengthBetween(3, 10)); - expectedParams.put("routing", searchRequest.routing()); - } - if (randomBoolean()) { - searchRequest.preference(randomAlphaOfLengthBetween(3, 10)); - expectedParams.put("preference", searchRequest.preference()); - } - if (randomBoolean()) { - searchRequest.searchType(randomFrom(SearchType.values())); - } - expectedParams.put("search_type", searchRequest.searchType().name().toLowerCase(Locale.ROOT)); - if (randomBoolean()) { - searchRequest.requestCache(randomBoolean()); - expectedParams.put("request_cache", Boolean.toString(searchRequest.requestCache())); - } - if (randomBoolean()) { - searchRequest.allowPartialSearchResults(randomBoolean()); - expectedParams.put("allow_partial_search_results", Boolean.toString(searchRequest.allowPartialSearchResults())); - } - if (randomBoolean()) { - searchRequest.setBatchedReduceSize(randomIntBetween(2, Integer.MAX_VALUE)); - } - expectedParams.put("batched_reduce_size", Integer.toString(searchRequest.getBatchedReduceSize())); - if (randomBoolean()) { - searchRequest.scroll(randomTimeValue()); - expectedParams.put("scroll", searchRequest.scroll().keepAlive().getStringRep()); - } - + setRandomSearchParams(searchRequest, expectedParams); setRandomIndicesOptions(searchRequest::indicesOptions, searchRequest::indicesOptions, expectedParams); SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - //rarely skip setting the search source completely + // rarely skip setting the search source completely if (frequently()) { - //frequently set the search source to have some content, otherwise leave it empty but still set it + // frequently set the search source to have some content, otherwise leave it + // empty but still set it if (frequently()) { if (randomBoolean()) { searchSourceBuilder.size(randomIntBetween(0, Integer.MAX_VALUE)); @@ -1116,7 +1097,8 @@ public void testMultiSearch() throws IOException { MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); for (int i = 0; i < numberOfSearchRequests; i++) { SearchRequest searchRequest = randomSearchRequest(() -> { - // No need to return a very complex SearchSourceBuilder here, that is tested elsewhere + // No need to return a very complex SearchSourceBuilder here, that is tested + // elsewhere SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); searchSourceBuilder.from(randomInt(10)); searchSourceBuilder.size(randomIntBetween(20, 100)); @@ -1124,14 +1106,13 @@ public void testMultiSearch() throws IOException { }); // scroll is not supported in the current msearch api, so unset it: searchRequest.scroll((Scroll) null); - // only expand_wildcards, ignore_unavailable and allow_no_indices can be specified from msearch api, so unset other options: + // only expand_wildcards, ignore_unavailable and allow_no_indices can be + // specified from msearch api, so unset other options: IndicesOptions randomlyGenerated = searchRequest.indicesOptions(); IndicesOptions msearchDefault = new MultiSearchRequest().indicesOptions(); - searchRequest.indicesOptions(IndicesOptions.fromOptions( - randomlyGenerated.ignoreUnavailable(), randomlyGenerated.allowNoIndices(), randomlyGenerated.expandWildcardsOpen(), - randomlyGenerated.expandWildcardsClosed(), msearchDefault.allowAliasesToMultipleIndices(), - msearchDefault.forbidClosedIndices(), msearchDefault.ignoreAliases() - )); + searchRequest.indicesOptions(IndicesOptions.fromOptions(randomlyGenerated.ignoreUnavailable(), + randomlyGenerated.allowNoIndices(), randomlyGenerated.expandWildcardsOpen(), randomlyGenerated.expandWildcardsClosed(), + msearchDefault.allowAliasesToMultipleIndices(), msearchDefault.forbidClosedIndices(), msearchDefault.ignoreAliases())); multiSearchRequest.add(searchRequest); } @@ -1156,8 +1137,8 @@ public void testMultiSearch() throws IOException { requests.add(searchRequest); }; MultiSearchRequest.readMultiLineFormat(new BytesArray(EntityUtils.toByteArray(request.getEntity())), - REQUEST_BODY_CONTENT_TYPE.xContent(), consumer, null, multiSearchRequest.indicesOptions(), null, null, - null, xContentRegistry(), true); + REQUEST_BODY_CONTENT_TYPE.xContent(), consumer, null, multiSearchRequest.indicesOptions(), null, null, null, + xContentRegistry(), true); assertEquals(requests, multiSearchRequest.requests()); } @@ -1189,11 +1170,70 @@ public void testClearScroll() throws IOException { assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); } + public void testSearchTemplate() throws Exception { + // Create a random request. + String[] indices = randomIndicesNames(0, 5); + SearchRequest searchRequest = new SearchRequest(indices); + + Map expectedParams = new HashMap<>(); + setRandomSearchParams(searchRequest, expectedParams); + setRandomIndicesOptions(searchRequest::indicesOptions, searchRequest::indicesOptions, expectedParams); + + SearchTemplateRequest searchTemplateRequest = new SearchTemplateRequest(searchRequest); + + searchTemplateRequest.setScript("{\"query\": { \"match\" : { \"{{field}}\" : \"{{value}}\" }}}"); + searchTemplateRequest.setScriptType(ScriptType.INLINE); + searchTemplateRequest.setProfile(randomBoolean()); + + Map scriptParams = new HashMap<>(); + scriptParams.put("field", "name"); + scriptParams.put("value", "soren"); + searchTemplateRequest.setScriptParams(scriptParams); + + // Verify that the resulting REST request looks as expected. + Request request = RequestConverters.searchTemplate(searchTemplateRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + String index = String.join(",", indices); + if (Strings.hasLength(index)) { + endpoint.add(index); + } + endpoint.add("_search/template"); + + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals(endpoint.toString(), request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + assertToXContentBody(searchTemplateRequest, request.getEntity()); + } + + public void testRenderSearchTemplate() throws Exception { + // Create a simple request. + SearchTemplateRequest searchTemplateRequest = new SearchTemplateRequest(); + searchTemplateRequest.setSimulate(true); // Setting simulate true means the template should only be rendered. + + searchTemplateRequest.setScript("template1"); + searchTemplateRequest.setScriptType(ScriptType.STORED); + searchTemplateRequest.setProfile(randomBoolean()); + + Map scriptParams = new HashMap<>(); + scriptParams.put("field", "name"); + scriptParams.put("value", "soren"); + searchTemplateRequest.setScriptParams(scriptParams); + + // Verify that the resulting REST request looks as expected. + Request request = RequestConverters.searchTemplate(searchTemplateRequest); + String endpoint = "_render/template"; + + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals(endpoint, request.getEndpoint()); + assertEquals(Collections.emptyMap(), request.getParameters()); + assertToXContentBody(searchTemplateRequest, request.getEntity()); + } + public void testExistsAlias() { GetAliasesRequest getAliasesRequest = new GetAliasesRequest(); String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); getAliasesRequest.indices(indices); - //the HEAD endpoint requires at least an alias or an index + // the HEAD endpoint requires at least an alias or an index boolean hasIndices = indices != null && indices.length > 0; String[] aliases; if (hasIndices) { @@ -1224,15 +1264,15 @@ public void testExistsAlias() { public void testExistsAliasNoAliasNoIndex() { { GetAliasesRequest getAliasesRequest = new GetAliasesRequest(); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> - RequestConverters.existsAlias(getAliasesRequest)); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, + () -> RequestConverters.existsAlias(getAliasesRequest)); assertEquals("existsAlias requires at least an alias or an index", iae.getMessage()); } { - GetAliasesRequest getAliasesRequest = new GetAliasesRequest((String[])null); - getAliasesRequest.indices((String[])null); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> - RequestConverters.existsAlias(getAliasesRequest)); + GetAliasesRequest getAliasesRequest = new GetAliasesRequest((String[]) null); + getAliasesRequest.indices((String[]) null); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, + () -> RequestConverters.existsAlias(getAliasesRequest)); assertEquals("existsAlias requires at least an alias or an index", iae.getMessage()); } } @@ -1242,14 +1282,10 @@ public void testFieldCaps() { String[] indices = randomIndicesNames(0, 5); String[] fields = generateRandomStringArray(5, 10, false, false); - FieldCapabilitiesRequest fieldCapabilitiesRequest = new FieldCapabilitiesRequest() - .indices(indices) - .fields(fields); + FieldCapabilitiesRequest fieldCapabilitiesRequest = new FieldCapabilitiesRequest().indices(indices).fields(fields); Map indicesOptionsParams = new HashMap<>(); - setRandomIndicesOptions(fieldCapabilitiesRequest::indicesOptions, - fieldCapabilitiesRequest::indicesOptions, - indicesOptionsParams); + setRandomIndicesOptions(fieldCapabilitiesRequest::indicesOptions, fieldCapabilitiesRequest::indicesOptions, indicesOptionsParams); Request request = RequestConverters.fieldCaps(fieldCapabilitiesRequest); @@ -1264,12 +1300,13 @@ public void testFieldCaps() { assertEquals(endpoint.toString(), request.getEndpoint()); assertEquals(4, request.getParameters().size()); - // Note that we don't check the field param value explicitly, as field names are passed through - // a hash set before being added to the request, and can appear in a non-deterministic order. + // Note that we don't check the field param value explicitly, as field names are + // passed through + // a hash set before being added to the request, and can appear in a + // non-deterministic order. assertThat(request.getParameters(), hasKey("fields")); String[] requestFields = Strings.splitStringByCommaToArray(request.getParameters().get("fields")); - assertEquals(new HashSet<>(Arrays.asList(fields)), - new HashSet<>(Arrays.asList(requestFields))); + assertEquals(new HashSet<>(Arrays.asList(fields)), new HashSet<>(Arrays.asList(requestFields))); for (Map.Entry param : indicesOptionsParams.entrySet()) { assertThat(request.getParameters(), hasEntry(param.getKey(), param.getValue())); @@ -1428,6 +1465,66 @@ public void testIndexPutSettings() throws IOException { assertEquals(expectedParams, request.getParameters()); } + public void testListTasks() { + { + ListTasksRequest request = new ListTasksRequest(); + Map expectedParams = new HashMap<>(); + if (randomBoolean()) { + request.setDetailed(randomBoolean()); + if (request.getDetailed()) { + expectedParams.put("detailed", "true"); + } + } + if (randomBoolean()) { + request.setWaitForCompletion(randomBoolean()); + if (request.getWaitForCompletion()) { + expectedParams.put("wait_for_completion", "true"); + } + } + if (randomBoolean()) { + String timeout = randomTimeValue(); + request.setTimeout(timeout); + expectedParams.put("timeout", timeout); + } + if (randomBoolean()) { + if (randomBoolean()) { + TaskId taskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong()); + request.setParentTaskId(taskId); + expectedParams.put("parent_task_id", taskId.toString()); + } else { + request.setParentTask(TaskId.EMPTY_TASK_ID); + } + } + if (randomBoolean()) { + String[] nodes = generateRandomStringArray(10, 8, false); + request.setNodes(nodes); + if (nodes.length > 0) { + expectedParams.put("nodes", String.join(",", nodes)); + } + } + if (randomBoolean()) { + String[] actions = generateRandomStringArray(10, 8, false); + request.setActions(actions); + if (actions.length > 0) { + expectedParams.put("actions", String.join(",", actions)); + } + } + expectedParams.put("group_by", "none"); + Request httpRequest = RequestConverters.listTasks(request); + assertThat(httpRequest, notNullValue()); + assertThat(httpRequest.getMethod(), equalTo(HttpGet.METHOD_NAME)); + assertThat(httpRequest.getEntity(), nullValue()); + assertThat(httpRequest.getEndpoint(), equalTo("/_tasks")); + assertThat(httpRequest.getParameters(), equalTo(expectedParams)); + } + { + ListTasksRequest request = new ListTasksRequest(); + request.setTaskId(new TaskId(randomAlphaOfLength(5), randomNonNegativeLong())); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.listTasks(request)); + assertEquals("TaskId cannot be used for list tasks request", exception.getMessage()); + } + } + public void testGetRepositories() { Map expectedParams = new HashMap<>(); StringBuilder endpoint = new StringBuilder("/_snapshot"); @@ -1437,7 +1534,7 @@ public void testGetRepositories() { setRandomLocal(getRepositoriesRequest, expectedParams); if (randomBoolean()) { - String[] entries = new String[] {"a", "b", "c"}; + String[] entries = new String[] { "a", "b", "c" }; getRepositoriesRequest.repositories(entries); endpoint.append("/" + String.join(",", entries)); } @@ -1448,6 +1545,27 @@ public void testGetRepositories() { assertThat(expectedParams, equalTo(request.getParameters())); } + public void testCreateRepository() throws IOException { + String repository = "repo"; + String endpoint = "/_snapshot/" + repository; + Path repositoryLocation = PathUtils.get("."); + PutRepositoryRequest putRepositoryRequest = new PutRepositoryRequest(repository); + putRepositoryRequest.type(FsRepository.TYPE); + putRepositoryRequest.verify(randomBoolean()); + + putRepositoryRequest.settings( + Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), repositoryLocation) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .build()); + + Request request = RequestConverters.createRepository(putRepositoryRequest); + assertThat(endpoint, equalTo(request.getEndpoint())); + assertThat(HttpPut.METHOD_NAME, equalTo(request.getMethod())); + assertToXContentBody(putRepositoryRequest, request.getEntity()); + } + public void testPutTemplateRequest() throws Exception { Map names = new HashMap<>(); names.put("log", "log"); @@ -1455,9 +1573,8 @@ public void testPutTemplateRequest() throws Exception { names.put("-#template", "-%23template"); names.put("foo^bar", "foo%5Ebar"); - PutIndexTemplateRequest putTemplateRequest = new PutIndexTemplateRequest() - .name(randomFrom(names.keySet())) - .patterns(Arrays.asList(generateRandomStringArray(20, 100, false, false))); + PutIndexTemplateRequest putTemplateRequest = new PutIndexTemplateRequest().name(randomFrom(names.keySet())) + .patterns(Arrays.asList(generateRandomStringArray(20, 100, false, false))); if (randomBoolean()) { putTemplateRequest.order(randomInt()); } @@ -1514,14 +1631,12 @@ public void testEndpointBuilder() { assertEquals("/a/b", endpointBuilder.build()); } { - EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPart("b") - .addPathPartAsIs("_create"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPart("b").addPathPartAsIs("_create"); assertEquals("/a/b/_create", endpointBuilder.build()); } { - EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a", "b", "c") - .addPathPartAsIs("_create"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a", "b", "c").addPathPartAsIs("_create"); assertEquals("/a/b/c/_create", endpointBuilder.build()); } { @@ -1580,13 +1695,12 @@ public void testEndpointBuilderEncodeParts() { assertEquals("/foo%5Ebar", endpointBuilder.build()); } { - EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("cluster1:index1,index2") - .addPathPartAsIs("_search"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("cluster1:index1,index2").addPathPartAsIs("_search"); assertEquals("/cluster1:index1,index2/_search", endpointBuilder.build()); } { - EndpointBuilder endpointBuilder = new EndpointBuilder() - .addCommaSeparatedPathParts(new String[]{"index1", "index2"}).addPathPartAsIs("cache/clear"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addCommaSeparatedPathParts(new String[] { "index1", "index2" }) + .addPathPartAsIs("cache/clear"); assertEquals("/index1,index2/cache/clear", endpointBuilder.build()); } } @@ -1594,12 +1708,12 @@ public void testEndpointBuilderEncodeParts() { public void testEndpoint() { assertEquals("/index/type/id", RequestConverters.endpoint("index", "type", "id")); assertEquals("/index/type/id/_endpoint", RequestConverters.endpoint("index", "type", "id", "_endpoint")); - assertEquals("/index1,index2", RequestConverters.endpoint(new String[]{"index1", "index2"})); - assertEquals("/index1,index2/_endpoint", RequestConverters.endpoint(new String[]{"index1", "index2"}, "_endpoint")); - assertEquals("/index1,index2/type1,type2/_endpoint", RequestConverters.endpoint(new String[]{"index1", "index2"}, - new String[]{"type1", "type2"}, "_endpoint")); - assertEquals("/index1,index2/_endpoint/suffix1,suffix2", RequestConverters.endpoint(new String[]{"index1", "index2"}, - "_endpoint", new String[]{"suffix1", "suffix2"})); + assertEquals("/index1,index2", RequestConverters.endpoint(new String[] { "index1", "index2" })); + assertEquals("/index1,index2/_endpoint", RequestConverters.endpoint(new String[] { "index1", "index2" }, "_endpoint")); + assertEquals("/index1,index2/type1,type2/_endpoint", + RequestConverters.endpoint(new String[] { "index1", "index2" }, new String[] { "type1", "type2" }, "_endpoint")); + assertEquals("/index1,index2/_endpoint/suffix1,suffix2", + RequestConverters.endpoint(new String[] { "index1", "index2" }, "_endpoint", new String[] { "suffix1", "suffix2" })); } public void testCreateContentType() { @@ -1615,20 +1729,22 @@ public void testEnforceSameContentType() { XContentType bulkContentType = randomBoolean() ? xContentType : null; - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> - enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.CBOR), bulkContentType)); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.CBOR), + bulkContentType)); assertEquals("Unsupported content-type found for request with content-type [CBOR], only JSON and SMILE are supported", exception.getMessage()); - exception = expectThrows(IllegalArgumentException.class, () -> - enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.YAML), bulkContentType)); + exception = expectThrows(IllegalArgumentException.class, + () -> enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.YAML), + bulkContentType)); assertEquals("Unsupported content-type found for request with content-type [YAML], only JSON and SMILE are supported", exception.getMessage()); XContentType requestContentType = xContentType == XContentType.JSON ? XContentType.SMILE : XContentType.JSON; - exception = expectThrows(IllegalArgumentException.class, () -> - enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), requestContentType), xContentType)); + exception = expectThrows(IllegalArgumentException.class, + () -> enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), requestContentType), xContentType)); assertEquals("Mismatching content-type found for request with content-type [" + requestContentType + "], " + "previous requests have content-type [" + xContentType + "]", exception.getMessage()); } @@ -1662,12 +1778,44 @@ private static void randomizeFetchSourceContextParams(Consumer expectedParams) { + expectedParams.put(RestSearchAction.TYPED_KEYS_PARAM, "true"); + if (randomBoolean()) { + searchRequest.routing(randomAlphaOfLengthBetween(3, 10)); + expectedParams.put("routing", searchRequest.routing()); + } + if (randomBoolean()) { + searchRequest.preference(randomAlphaOfLengthBetween(3, 10)); + expectedParams.put("preference", searchRequest.preference()); + } + if (randomBoolean()) { + searchRequest.searchType(randomFrom(SearchType.values())); + } + expectedParams.put("search_type", searchRequest.searchType().name().toLowerCase(Locale.ROOT)); + if (randomBoolean()) { + searchRequest.requestCache(randomBoolean()); + expectedParams.put("request_cache", Boolean.toString(searchRequest.requestCache())); + } + if (randomBoolean()) { + searchRequest.allowPartialSearchResults(randomBoolean()); + expectedParams.put("allow_partial_search_results", Boolean.toString(searchRequest.allowPartialSearchResults())); + } + if (randomBoolean()) { + searchRequest.setBatchedReduceSize(randomIntBetween(2, Integer.MAX_VALUE)); + } + expectedParams.put("batched_reduce_size", Integer.toString(searchRequest.getBatchedReduceSize())); + if (randomBoolean()) { + searchRequest.scroll(randomTimeValue()); + expectedParams.put("scroll", searchRequest.scroll().keepAlive().getStringRep()); + } + } + private static void setRandomIndicesOptions(Consumer setter, Supplier getter, - Map expectedParams) { + Map expectedParams) { if (randomBoolean()) { - setter.accept(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), - randomBoolean())); + setter.accept(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); } expectedParams.put("ignore_unavailable", Boolean.toString(getter.get().ignoreUnavailable())); expectedParams.put("allow_no_indices", Boolean.toString(getter.get().allowNoIndices())); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index 549b4ce0a85c5..e147642fc73bd 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -38,8 +38,11 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.ScriptQueryBuilder; import org.elasticsearch.index.query.TermsQueryBuilder; @@ -48,6 +51,8 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; +import org.elasticsearch.script.mustache.SearchTemplateRequest; +import org.elasticsearch.script.mustache.SearchTemplateResponse; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.range.Range; @@ -69,10 +74,12 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; @@ -733,6 +740,103 @@ public void testMultiSearch_failure() throws Exception { assertThat(multiSearchResponse.getResponses()[1].getResponse(), nullValue()); } + public void testSearchTemplate() throws IOException { + SearchTemplateRequest searchTemplateRequest = new SearchTemplateRequest(); + searchTemplateRequest.setRequest(new SearchRequest("index")); + + searchTemplateRequest.setScriptType(ScriptType.INLINE); + searchTemplateRequest.setScript( + "{" + + " \"query\": {" + + " \"match\": {" + + " \"num\": {{number}}" + + " }" + + " }" + + "}"); + + Map scriptParams = new HashMap<>(); + scriptParams.put("number", 10); + searchTemplateRequest.setScriptParams(scriptParams); + + searchTemplateRequest.setExplain(true); + searchTemplateRequest.setProfile(true); + + SearchTemplateResponse searchTemplateResponse = execute(searchTemplateRequest, + highLevelClient()::searchTemplate, + highLevelClient()::searchTemplateAsync); + + assertNull(searchTemplateResponse.getSource()); + + SearchResponse searchResponse = searchTemplateResponse.getResponse(); + assertNotNull(searchResponse); + + assertEquals(1, searchResponse.getHits().totalHits); + assertEquals(1, searchResponse.getHits().getHits().length); + assertThat(searchResponse.getHits().getMaxScore(), greaterThan(0f)); + + SearchHit hit = searchResponse.getHits().getHits()[0]; + assertNotNull(hit.getExplanation()); + + assertFalse(searchResponse.getProfileResults().isEmpty()); + } + + public void testNonExistentSearchTemplate() { + SearchTemplateRequest searchTemplateRequest = new SearchTemplateRequest(); + searchTemplateRequest.setRequest(new SearchRequest("index")); + + searchTemplateRequest.setScriptType(ScriptType.STORED); + searchTemplateRequest.setScript("non-existent"); + searchTemplateRequest.setScriptParams(Collections.emptyMap()); + + ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, + () -> execute(searchTemplateRequest, + highLevelClient()::searchTemplate, + highLevelClient()::searchTemplateAsync)); + + assertEquals(RestStatus.NOT_FOUND, exception.status()); + } + + public void testRenderSearchTemplate() throws IOException { + SearchTemplateRequest searchTemplateRequest = new SearchTemplateRequest(); + + searchTemplateRequest.setScriptType(ScriptType.INLINE); + searchTemplateRequest.setScript( + "{" + + " \"query\": {" + + " \"match\": {" + + " \"num\": {{number}}" + + " }" + + " }" + + "}"); + + Map scriptParams = new HashMap<>(); + scriptParams.put("number", 10); + searchTemplateRequest.setScriptParams(scriptParams); + + // Setting simulate true causes the template to only be rendered. + searchTemplateRequest.setSimulate(true); + + SearchTemplateResponse searchTemplateResponse = execute(searchTemplateRequest, + highLevelClient()::searchTemplate, + highLevelClient()::searchTemplateAsync); + assertNull(searchTemplateResponse.getResponse()); + + BytesReference expectedSource = BytesReference.bytes( + XContentFactory.jsonBuilder() + .startObject() + .startObject("query") + .startObject("match") + .field("num", 10) + .endObject() + .endObject() + .endObject()); + + BytesReference actualSource = searchTemplateResponse.getSource(); + assertNotNull(actualSource); + + assertToXContentEquivalent(expectedSource, actualSource, XContentType.JSON); + } + public void testFieldCaps() throws IOException { FieldCapabilitiesRequest request = new FieldCapabilitiesRequest() .indices("index1", "index2") diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java index ab2c632bfeb58..1d0ea953cd5c1 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java @@ -19,56 +19,56 @@ package org.elasticsearch.client; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.RestStatus; import java.io.IOException; -import java.util.Collections; import static org.hamcrest.Matchers.equalTo; public class SnapshotIT extends ESRestHighLevelClientTestCase { - public void testModulesGetRepositoriesUsingParams() throws IOException { - String repository = "test"; - String repositorySettings = "{\"type\":\"fs\", \"settings\":{\"location\": \".\"}}"; - highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/" + repository, Collections.emptyMap(), - new StringEntity(repositorySettings, ContentType.APPLICATION_JSON)); - - highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/" + repository + "_other", Collections.emptyMap(), - new StringEntity(repositorySettings, ContentType.APPLICATION_JSON)); + private PutRepositoryResponse createTestRepository(String repository, String type, String settings) throws IOException { + PutRepositoryRequest request = new PutRepositoryRequest(repository); + request.settings(settings, XContentType.JSON); + request.type(type); + return execute(request, highLevelClient().snapshot()::createRepository, + highLevelClient().snapshot()::createRepositoryAsync); - { - GetRepositoriesRequest request = new GetRepositoriesRequest(); - request.repositories(new String[]{repository}); - GetRepositoriesResponse response = execute(request, highLevelClient().snapshot()::getRepositories, - highLevelClient().snapshot()::getRepositoriesAsync); - assertThat(1, equalTo(response.repositories().size())); - } - { - GetRepositoriesRequest request = new GetRepositoriesRequest(); - GetRepositoriesResponse response = execute(request, highLevelClient().snapshot()::getRepositories, - highLevelClient().snapshot()::getRepositoriesAsync); - assertThat(2, equalTo(response.repositories().size())); - } } - public void testModulesGetDefaultRepositories() throws IOException { - String repositorySettings = "{\"type\":\"fs\", \"settings\":{\"location\": \".\"}}"; - GetRepositoriesRequest request = new GetRepositoriesRequest(); + public void testCreateRepository() throws IOException { + PutRepositoryResponse response = createTestRepository("test", FsRepository.TYPE, "{\"location\": \".\"}"); + assertTrue(response.isAcknowledged()); + } - highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/test", Collections.emptyMap(), - new StringEntity(repositorySettings, ContentType.APPLICATION_JSON)); + public void testModulesGetRepositoriesUsingParams() throws IOException { + String testRepository = "test"; + assertTrue(createTestRepository(testRepository, FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged()); + assertTrue(createTestRepository("other", FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged()); + GetRepositoriesRequest request = new GetRepositoriesRequest(); + request.repositories(new String[]{testRepository}); GetRepositoriesResponse response = execute(request, highLevelClient().snapshot()::getRepositories, highLevelClient().snapshot()::getRepositoriesAsync); assertThat(1, equalTo(response.repositories().size())); } + public void testModulesGetDefaultRepositories() throws IOException { + assertTrue(createTestRepository("other", FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged()); + assertTrue(createTestRepository("test", FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged()); + + GetRepositoriesResponse response = execute(new GetRepositoriesRequest(), highLevelClient().snapshot()::getRepositories, + highLevelClient().snapshot()::getRepositoriesAsync); + assertThat(2, equalTo(response.repositories().size())); + } + public void testModulesGetRepositoriesNonExistent() throws IOException { String repository = "doesnotexist"; GetRepositoriesRequest request = new GetRepositoriesRequest(new String[]{repository}); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java index 2e7ea1650f424..d41b11c68fe44 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java @@ -19,8 +19,14 @@ package org.elasticsearch.client.documentation; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; @@ -31,14 +37,20 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.tasks.TaskInfo; import java.io.IOException; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import static java.util.Collections.emptyList; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.notNullValue; /** * This class is used to generate the Java Cluster API documentation. @@ -177,4 +189,87 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } + + public void testListTasks() throws IOException { + RestHighLevelClient client = highLevelClient(); + { + // tag::list-tasks-request + ListTasksRequest request = new ListTasksRequest(); + // end::list-tasks-request + + // tag::list-tasks-request-filter + request.setActions("cluster:*"); // <1> + request.setNodes("nodeId1", "nodeId2"); // <2> + request.setParentTaskId(new TaskId("parentTaskId", 42)); // <3> + // end::list-tasks-request-filter + + // tag::list-tasks-request-detailed + request.setDetailed(true); // <1> + // end::list-tasks-request-detailed + + // tag::list-tasks-request-wait-completion + request.setWaitForCompletion(true); // <1> + request.setTimeout(TimeValue.timeValueSeconds(50)); // <2> + request.setTimeout("50s"); // <3> + // end::list-tasks-request-wait-completion + } + + ListTasksRequest request = new ListTasksRequest(); + + // tag::list-tasks-execute + ListTasksResponse response = client.cluster().listTasks(request); + // end::list-tasks-execute + + assertThat(response, notNullValue()); + + // tag::list-tasks-response-tasks + List tasks = response.getTasks(); // <1> + // end::list-tasks-response-tasks + + // tag::list-tasks-response-calc + Map> perNodeTasks = response.getPerNodeTasks(); // <1> + List groups = response.getTaskGroups(); // <2> + // end::list-tasks-response-calc + + // tag::list-tasks-response-failures + List nodeFailures = response.getNodeFailures(); // <1> + List taskFailures = response.getTaskFailures(); // <2> + // end::list-tasks-response-failures + + assertThat(response.getNodeFailures(), equalTo(emptyList())); + assertThat(response.getTaskFailures(), equalTo(emptyList())); + assertThat(response.getTasks().size(), greaterThanOrEqualTo(2)); + } + + public void testListTasksAsync() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + ListTasksRequest request = new ListTasksRequest(); + + // tag::list-tasks-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(ListTasksResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::list-tasks-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::list-tasks-execute-async + client.cluster().listTasksAsync(request, listener); // <1> + // end::list-tasks-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index 8a12016025c3e..463c5f7d12f5e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -41,7 +41,11 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.unit.TimeValue; @@ -60,6 +64,9 @@ import org.elasticsearch.index.rankeval.RatedRequest; import org.elasticsearch.index.rankeval.RatedSearchHit; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.script.mustache.SearchTemplateRequest; +import org.elasticsearch.script.mustache.SearchTemplateResponse; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; @@ -92,6 +99,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -706,9 +714,130 @@ public void onFailure(Exception e) { } } + public void testSearchTemplateWithInlineScript() throws Exception { + indexSearchTestData(); + RestHighLevelClient client = highLevelClient(); + + // tag::search-template-request-inline + SearchTemplateRequest request = new SearchTemplateRequest(); + request.setRequest(new SearchRequest("posts")); // <1> + + request.setScriptType(ScriptType.INLINE); + request.setScript( // <2> + "{" + + " \"query\": { \"match\" : { \"{{field}}\" : \"{{value}}\" } }," + + " \"size\" : \"{{size}}\"" + + "}"); + + Map scriptParams = new HashMap<>(); + scriptParams.put("field", "title"); + scriptParams.put("value", "elasticsearch"); + scriptParams.put("size", 5); + request.setScriptParams(scriptParams); // <3> + // end::search-template-request-inline + + // tag::search-template-response + SearchTemplateResponse response = client.searchTemplate(request); + SearchResponse searchResponse = response.getResponse(); + // end::search-template-response + + assertNotNull(searchResponse); + assertTrue(searchResponse.getHits().totalHits > 0); + + // tag::render-search-template-request + request.setSimulate(true); // <1> + // end::render-search-template-request + + // tag::render-search-template-response + SearchTemplateResponse renderResponse = client.searchTemplate(request); + BytesReference source = renderResponse.getSource(); // <1> + // end::render-search-template-response + + assertNotNull(source); + assertEquals(( + "{" + + " \"size\" : \"5\"," + + " \"query\": { \"match\" : { \"title\" : \"elasticsearch\" } }" + + "}").replaceAll("\\s+", ""), source.utf8ToString()); + } + + public void testSearchTemplateWithStoredScript() throws Exception { + indexSearchTestData(); + RestHighLevelClient client = highLevelClient(); + RestClient restClient = client(); + + // tag::register-script + Request scriptRequest = new Request("POST", "_scripts/title_search"); + scriptRequest.setJsonEntity( + "{" + + " \"script\": {" + + " \"lang\": \"mustache\"," + + " \"source\": {" + + " \"query\": { \"match\" : { \"{{field}}\" : \"{{value}}\" } }," + + " \"size\" : \"{{size}}\"" + + " }" + + " }" + + "}"); + Response scriptResponse = restClient.performRequest(scriptRequest); + // end::register-script + assertEquals(RestStatus.OK.getStatus(), scriptResponse.getStatusLine().getStatusCode()); + + // tag::search-template-request-stored + SearchTemplateRequest request = new SearchTemplateRequest(); + request.setRequest(new SearchRequest("posts")); + + request.setScriptType(ScriptType.STORED); + request.setScript("title_search"); + + Map params = new HashMap<>(); + params.put("field", "title"); + params.put("value", "elasticsearch"); + params.put("size", 5); + request.setScriptParams(params); + // end::search-template-request-stored + + // tag::search-template-request-options + request.setExplain(true); + request.setProfile(true); + // end::search-template-request-options + + // tag::search-template-execute + SearchTemplateResponse response = client.searchTemplate(request); + // end::search-template-execute + + SearchResponse searchResponse = response.getResponse(); + assertNotNull(searchResponse); + assertTrue(searchResponse.getHits().totalHits > 0); + + // tag::search-template-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(SearchTemplateResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::search-template-execute-listener + + // Replace the empty listener by a blocking listener for tests. + CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::search-template-execute-async + client.searchTemplateAsync(request, listener); // <1> + // end::search-template-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + public void testFieldCaps() throws Exception { indexSearchTestData(); RestHighLevelClient client = highLevelClient(); + // tag::field-caps-request FieldCapabilitiesRequest request = new FieldCapabilitiesRequest() .fields("user") diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java index 1044cc9da3332..c57f8e2a2fbd5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java @@ -19,20 +19,24 @@ package org.elasticsearch.client.documentation; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.repositories.fs.FsRepository; import java.io.IOException; -import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -58,7 +62,114 @@ */ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase { - private static final String testRepository = "test_repository"; + private static final String repositoryName = "test_repository"; + + public void testSnapshotCreateRepository() throws IOException { + RestHighLevelClient client = highLevelClient(); + + // tag::create-repository-request + PutRepositoryRequest request = new PutRepositoryRequest(); + // end::create-repository-request + + // tag::create-repository-create-settings + String locationKey = FsRepository.LOCATION_SETTING.getKey(); + String locationValue = "."; + String compressKey = FsRepository.COMPRESS_SETTING.getKey(); + boolean compressValue = true; + + Settings settings = Settings.builder() + .put(locationKey, locationValue) + .put(compressKey, compressValue) + .build(); // <1> + // end::create-repository-create-settings + + // tag::create-repository-request-repository-settings + request.settings(settings); // <1> + // end::create-repository-request-repository-settings + + { + // tag::create-repository-settings-builder + Settings.Builder settingsBuilder = Settings.builder() + .put(locationKey, locationValue) + .put(compressKey, compressValue); + request.settings(settingsBuilder); // <1> + // end::create-repository-settings-builder + } + { + // tag::create-repository-settings-map + Map map = new HashMap<>(); + map.put(locationKey, locationValue); + map.put(compressKey, compressValue); + request.settings(map); // <1> + // end::create-repository-settings-map + } + { + // tag::create-repository-settings-source + request.settings("{\"location\": \".\", \"compress\": \"true\"}", + XContentType.JSON); // <1> + // end::create-repository-settings-source + } + + // tag::create-repository-request-name + request.name(repositoryName); // <1> + // end::create-repository-request-name + // tag::create-repository-request-type + request.type(FsRepository.TYPE); // <1> + // end::create-repository-request-type + + // tag::create-repository-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::create-repository-request-masterTimeout + // tag::create-repository-request-timeout + request.timeout(TimeValue.timeValueMinutes(1)); // <1> + request.timeout("1m"); // <2> + // end::create-repository-request-timeout + // tag::create-repository-request-verify + request.verify(true); // <1> + // end::create-repository-request-verify + + // tag::create-repository-execute + PutRepositoryResponse response = client.snapshot().createRepository(request); + // end::create-repository-execute + + // tag::create-repository-response + boolean acknowledged = response.isAcknowledged(); // <1> + // end::create-repository-response + assertTrue(acknowledged); + } + + public void testSnapshotCreateRepositoryAsync() throws InterruptedException { + RestHighLevelClient client = highLevelClient(); + { + PutRepositoryRequest request = new PutRepositoryRequest(repositoryName); + + // tag::create-repository-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(PutRepositoryResponse putRepositoryResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::create-repository-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::create-repository-execute-async + client.snapshot().createRepositoryAsync(request, listener); // <1> + // end::create-repository-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } public void testSnapshotGetRepository() throws IOException { RestHighLevelClient client = highLevelClient(); @@ -70,7 +181,7 @@ public void testSnapshotGetRepository() throws IOException { // end::get-repository-request // tag::get-repository-request-repositories - String [] repositories = new String[] { testRepository }; + String [] repositories = new String[] {repositoryName}; request.repositories(repositories); // <1> // end::get-repository-request-repositories // tag::get-repository-request-local @@ -89,7 +200,7 @@ public void testSnapshotGetRepository() throws IOException { List repositoryMetaDataResponse = response.repositories(); // end::get-repository-response assertThat(1, equalTo(repositoryMetaDataResponse.size())); - assertThat(testRepository, equalTo(repositoryMetaDataResponse.get(0).name())); + assertThat(repositoryName, equalTo(repositoryMetaDataResponse.get(0).name())); } public void testSnapshotGetRepositoryAsync() throws InterruptedException { @@ -122,14 +233,12 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } - } private void createTestRepositories() throws IOException { - RestHighLevelClient client = highLevelClient(); - String repositorySettings = "{\"type\":\"fs\", \"settings\":{\"location\": \".\"}}"; - highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/" + testRepository, Collections.emptyMap(), - new StringEntity(repositorySettings, ContentType.APPLICATION_JSON)); - + PutRepositoryRequest request = new PutRepositoryRequest(repositoryName); + request.type(FsRepository.TYPE); + request.settings("{\"location\": \".\"}", XContentType.JSON); + assertTrue(highLevelClient().snapshot().createRepository(request).isAcknowledged()); } } diff --git a/docs/java-api/query-dsl/has-child-query.asciidoc b/docs/java-api/query-dsl/has-child-query.asciidoc index 300b32e1922b0..f47f3af487dfe 100644 --- a/docs/java-api/query-dsl/has-child-query.asciidoc +++ b/docs/java-api/query-dsl/has-child-query.asciidoc @@ -9,7 +9,7 @@ When using the `has_child` query it is important to use the `PreBuiltTransportCl -------------------------------------------------- Settings settings = Settings.builder().put("cluster.name", "elasticsearch").build(); TransportClient client = new PreBuiltTransportClient(settings); -client.addTransportAddress(new InetSocketTransportAddress(new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 9300))); +client.addTransportAddress(new TransportAddress(new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 9300))); -------------------------------------------------- Otherwise the parent-join module doesn't get loaded and the `has_child` query can't be used from the transport client. diff --git a/docs/java-rest/high-level/cluster/list_tasks.asciidoc b/docs/java-rest/high-level/cluster/list_tasks.asciidoc new file mode 100644 index 0000000000000..1a2117b2e66e6 --- /dev/null +++ b/docs/java-rest/high-level/cluster/list_tasks.asciidoc @@ -0,0 +1,101 @@ +[[java-rest-high-cluster-list-tasks]] +=== List Tasks API + +The List Tasks API allows to get information about the tasks currently executing in the cluster. + +[[java-rest-high-cluster-list-tasks-request]] +==== List Tasks Request + +A `ListTasksRequest`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-request] +-------------------------------------------------- +There is no required parameters. By default the client will list all tasks and will not wait +for task completion. + +==== Parameters + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-request-filter] +-------------------------------------------------- +<1> Request only cluster-related tasks +<2> Request all tasks running on nodes nodeId1 and nodeId2 +<3> Request only children of a particular task + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-request-detailed] +-------------------------------------------------- +<1> Should the information include detailed, potentially slow to generate data. Defaults to `false` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-request-wait-completion] +-------------------------------------------------- +<1> Should this request wait for all found tasks to complete. Defaults to `false` +<2> Timeout for the request as a `TimeValue`. Applicable only if `setWaitForCompletion` is `true`. +Defaults to 30 seconds +<3> Timeout as a `String` + +[[java-rest-high-cluster-list-tasks-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-execute] +-------------------------------------------------- + +[[java-rest-high-cluster-list-tasks-async]] +==== Asynchronous Execution + +The asynchronous execution of a cluster update settings requires both the +`ListTasksRequest` instance and an `ActionListener` instance to be +passed to the asynchronous method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-execute-async] +-------------------------------------------------- +<1> The `ListTasksRequest` to execute and the `ActionListener` to use +when the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `ListTasksResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of a failure. The raised exception is provided as an argument + +[[java-rest-high-cluster-list-tasks-response]] +==== List Tasks Response + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-response-tasks] +-------------------------------------------------- +<1> List of currently running tasks + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-response-calc] +-------------------------------------------------- +<1> List of tasks grouped by a node +<2> List of tasks grouped by a parent task + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-response-failures] +-------------------------------------------------- +<1> List of node failures +<2> List of tasks failures diff --git a/docs/java-rest/high-level/search/search-template.asciidoc b/docs/java-rest/high-level/search/search-template.asciidoc new file mode 100644 index 0000000000000..3f0dfb8ab28e0 --- /dev/null +++ b/docs/java-rest/high-level/search/search-template.asciidoc @@ -0,0 +1,117 @@ +[[java-rest-high-search-template]] +=== Search Template API + +The search template API allows for searches to be executed from a template based +on the mustache language, and also for previewing rendered templates. + +[[java-rest-high-search-template-request]] +==== Search Template Request + +===== Inline Templates + +In the most basic form of request, the search template is specified inline: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[search-template-request-inline] +-------------------------------------------------- +<1> The search is executed against the `posts` index. +<2> The template defines the structure of the search source. It is passed +as a string because mustache templates are not always valid JSON. +<3> Before running the search, the template is rendered with the provided parameters. + +===== Registered Templates + +Search templates can be registered in advance through stored scripts API. Note that +the stored scripts API is not yet available in the high-level REST client, so in this +example we use the low-level REST client. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[register-script] +-------------------------------------------------- + +Instead of providing an inline script, we can refer to this registered template in the request: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[search-template-request-stored] +-------------------------------------------------- + +===== Rendering Templates + +Given parameter values, a template can be rendered without executing a search: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[render-search-template-request] +-------------------------------------------------- +<1> Setting `simulate` to `true` causes the search template to only be rendered. + +Both inline and pre-registered templates can be rendered. + +===== Optional Arguments + +As in standard search requests, the `explain` and `profile` options are supported: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[search-template-request-options] +-------------------------------------------------- + +===== Additional References + +The {ref}/search-template.html[Search Template documentation] contains further examples of how search requests can be templated. + +[[java-rest-high-search-template-sync]] +==== Synchronous Execution + +The `searchTemplate` method executes the request synchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[search-template-execute] +-------------------------------------------------- + +==== Asynchronous Execution + +A search template request can be executed asynchronously through the `searchTemplateAsync` +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[search-template-execute-async] +-------------------------------------------------- +<1> The `SearchTemplateRequest` to execute and the `ActionListener` to call when the execution completes. + +The asynchronous method does not block and returns immediately. Once the request completes, the +`ActionListener` is called back using the `onResponse` method if the execution completed successfully, +or using the `onFailure` method if it failed. + +A typical listener for `SearchTemplateResponse` is constructed as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[search-template-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. +<2> Called when the whole `SearchTemplateRequest` fails. + +==== Search Template Response + +For a standard search template request, the response contains a `SearchResponse` object +with the result of executing the search: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[search-template-response] +-------------------------------------------------- + +If `simulate` was set to `true` in the request, then the response +will contain the rendered search source instead of a `SearchResponse`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[render-search-template-response] +-------------------------------------------------- +<1> The rendered source in bytes, in our example `{"query": { "match" : { "title" : "elasticsearch" }}, "size" : 5}`. diff --git a/docs/java-rest/high-level/snapshot/create_repository.asciidoc b/docs/java-rest/high-level/snapshot/create_repository.asciidoc new file mode 100644 index 0000000000000..5c54529209720 --- /dev/null +++ b/docs/java-rest/high-level/snapshot/create_repository.asciidoc @@ -0,0 +1,139 @@ +[[java-rest-high-snapshot-create-repository]] +=== Snapshot Create RepositoryAPI + +The Snapshot Create RepositoryAPI allows to register a snapshot repository. + +[[java-rest-high-snapshot-create-repository-request]] +==== Snapshot Create RepositoryRequest + +A `PutRepositoryRequest`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request] +-------------------------------------------------- + +==== Repository Settings +Settings requirements will differ based on the repository backend chosen. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-repository-settings] +-------------------------------------------------- +<1> Sets the repository settings + +==== Providing the Settings +The settings to be applied can be provided in different ways: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-create-settings] +-------------------------------------------------- +<1> Settings provided as `Settings` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-settings-builder] +-------------------------------------------------- +<1> Settings provided as `Settings.Builder` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-settings-source] +-------------------------------------------------- +<1> Settings provided as `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-settings-map] +-------------------------------------------------- +<1> Settings provided as a `Map` + +==== Required Arguments +The following arguments must be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-name] +-------------------------------------------------- +<1> The name of the repository + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-type] +-------------------------------------------------- +<1> The type of the repository + +==== Optional Arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the settings were applied +as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the settings were applied +as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-verify] +-------------------------------------------------- +<1> Verify after creation as a `Boolean` + +[[java-rest-high-snapshot-create-repository-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-execute] +-------------------------------------------------- + +[[java-rest-high-snapshot-create-repository-async]] +==== Asynchronous Execution + +The asynchronous execution of a repository put settings requires both the +`PutRepositoryRequest` instance and an `ActionListener` instance to be +passed to the asynchronous method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-execute-async] +-------------------------------------------------- +<1> The `PutRepositoryRequest` to execute and the `ActionListener` +to use when the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `PutRepositoryResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of a failure. The raised exception is provided as an argument + +[[java-rest-high-snapshot-create-repository-response]] +==== Snapshot Create RepositoryResponse + +The returned `PutRepositoryResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-response] +-------------------------------------------------- +<1> Indicates the node has acknowledged the request diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 2dee4643e73eb..b00047359a5d7 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -31,6 +31,7 @@ The Java High Level REST Client supports the following Search APIs: * <> * <> * <> +* <> * <> * <> * <> @@ -38,6 +39,7 @@ The Java High Level REST Client supports the following Search APIs: include::search/search.asciidoc[] include::search/scroll.asciidoc[] include::search/multi-search.asciidoc[] +include::search/search-template.asciidoc[] include::search/field-caps.asciidoc[] include::search/rank-eval.asciidoc[] @@ -102,8 +104,10 @@ include::indices/put_template.asciidoc[] The Java High Level REST Client supports the following Cluster APIs: * <> +* <> include::cluster/put_settings.asciidoc[] +include::cluster/list_tasks.asciidoc[] == Snapshot APIs @@ -111,4 +115,5 @@ The Java High Level REST Client supports the following Snapshot APIs: * <> -include::snapshot/get_repository.asciidoc[] \ No newline at end of file +include::snapshot/get_repository.asciidoc[] +include::snapshot/create_repository.asciidoc[] diff --git a/docs/reference/aggregations/pipeline.asciidoc b/docs/reference/aggregations/pipeline.asciidoc index bd1b0284a84fb..37c1c357007b0 100644 --- a/docs/reference/aggregations/pipeline.asciidoc +++ b/docs/reference/aggregations/pipeline.asciidoc @@ -72,6 +72,7 @@ POST /_search } -------------------------------------------------- // CONSOLE +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] <1> The metric is called `"the_sum"` <2> The `buckets_path` refers to the metric via a relative path `"the_sum"` @@ -136,6 +137,7 @@ POST /_search } -------------------------------------------------- // CONSOLE +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] <1> By using `_count` instead of a metric name, we can calculate the moving average of document counts in the histogram The `buckets_path` can also use `"_bucket_count"` and path to a multi-bucket aggregation to use the number of buckets @@ -231,6 +233,7 @@ include::pipeline/stats-bucket-aggregation.asciidoc[] include::pipeline/extended-stats-bucket-aggregation.asciidoc[] include::pipeline/percentiles-bucket-aggregation.asciidoc[] include::pipeline/movavg-aggregation.asciidoc[] +include::pipeline/movfn-aggregation.asciidoc[] include::pipeline/cumulative-sum-aggregation.asciidoc[] include::pipeline/bucket-script-aggregation.asciidoc[] include::pipeline/bucket-selector-aggregation.asciidoc[] diff --git a/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc b/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc index db73510216be0..39a8255c90705 100644 --- a/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc @@ -1,6 +1,10 @@ [[search-aggregations-pipeline-movavg-aggregation]] === Moving Average Aggregation +deprecated[6.4.0, The Moving Average aggregation has been deprecated in favor of the more general +<>. The new Moving Function aggregation provides +all the same functionality as the Moving Average aggregation, but also provides more flexibility.] + Given an ordered series of data, the Moving Average aggregation will slide a window across the data and emit the average value of that window. For example, given the data `[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]`, we can calculate a simple moving average with windows size of `5` as follows: @@ -74,6 +78,7 @@ POST /_search -------------------------------------------------- // CONSOLE // TEST[setup:sales] +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] <1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-day intervals <2> A `sum` metric is used to calculate the sum of a field. This could be any metric (sum, min, max, etc) @@ -180,6 +185,7 @@ POST /_search -------------------------------------------------- // CONSOLE // TEST[setup:sales] +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] A `simple` model has no special settings to configure @@ -233,6 +239,7 @@ POST /_search -------------------------------------------------- // CONSOLE // TEST[setup:sales] +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] A `linear` model has no special settings to configure @@ -295,7 +302,7 @@ POST /_search -------------------------------------------------- // CONSOLE // TEST[setup:sales] - +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] [[single_0.2alpha]] .EWMA with window of size 10, alpha = 0.2 @@ -355,6 +362,7 @@ POST /_search -------------------------------------------------- // CONSOLE // TEST[setup:sales] +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] In practice, the `alpha` value behaves very similarly in `holt` as `ewma`: small values produce more smoothing and more lag, while larger values produce closer tracking and less lag. The value of `beta` is often difficult @@ -446,7 +454,7 @@ POST /_search -------------------------------------------------- // CONSOLE // TEST[setup:sales] - +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] [[holt_winters_add]] .Holt-Winters moving average with window of size 120, alpha = 0.5, beta = 0.7, gamma = 0.3, period = 30 @@ -508,6 +516,7 @@ POST /_search -------------------------------------------------- // CONSOLE // TEST[setup:sales] +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] ==== Prediction @@ -550,6 +559,7 @@ POST /_search -------------------------------------------------- // CONSOLE // TEST[setup:sales] +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] The `simple`, `linear` and `ewma` models all produce "flat" predictions: they essentially converge on the mean of the last value in the series, producing a flat: @@ -631,6 +641,7 @@ POST /_search -------------------------------------------------- // CONSOLE // TEST[setup:sales] +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] <1> Minimization is enabled with the `minimize` parameter diff --git a/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc new file mode 100644 index 0000000000000..b05c56b880560 --- /dev/null +++ b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc @@ -0,0 +1,633 @@ +[[search-aggregations-pipeline-movfn-aggregation]] +=== Moving Function Aggregation + +Given an ordered series of data, the Moving Function aggregation will slide a window across the data and allow the user to specify a custom +script that is executed on each window of data. For convenience, a number of common functions are predefined such as min/max, moving averages, +etc. + +This is conceptually very similar to the <> pipeline aggregation, except +it provides more functionality. +==== Syntax + +A `moving_fn` aggregation looks like this in isolation: + +[source,js] +-------------------------------------------------- +{ + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "MovingFunctions.min(values)" + } +} +-------------------------------------------------- +// NOTCONSOLE + +.`moving_avg` Parameters +|=== +|Parameter Name |Description |Required |Default Value +|`buckets_path` |Path to the metric of interest (see <> for more details |Required | +|`window` |The size of window to "slide" across the histogram. |Required | +|`script` |The script that should be executed on each window of data |Required | +|=== + +`moving_fn` aggregations must be embedded inside of a `histogram` or `date_histogram` aggregation. They can be +embedded like any other metric aggregation: + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ <1> + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } <2> + }, + "the_movfn": { + "moving_fn": { + "buckets_path": "the_sum", <3> + "window": 10, + "script": "MovingFunctions.unweightedAvg(values)" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +<1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-day intervals +<2> A `sum` metric is used to calculate the sum of a field. This could be any numeric metric (sum, min, max, etc) +<3> Finally, we specify a `moving_fn` aggregation which uses "the_sum" metric as its input. + +Moving averages are built by first specifying a `histogram` or `date_histogram` over a field. You can then optionally +add numeric metrics, such as a `sum`, inside of that histogram. Finally, the `moving_fn` is embedded inside the histogram. +The `buckets_path` parameter is then used to "point" at one of the sibling metrics inside of the histogram (see +<> for a description of the syntax for `buckets_path`. + +An example response from the above aggregation may look like: + +[source,js] +-------------------------------------------------- +{ + "took": 11, + "timed_out": false, + "_shards": ..., + "hits": ..., + "aggregations": { + "my_date_histo": { + "buckets": [ + { + "key_as_string": "2015/01/01 00:00:00", + "key": 1420070400000, + "doc_count": 3, + "the_sum": { + "value": 550.0 + }, + "the_movfn": { + "value": null + } + }, + { + "key_as_string": "2015/02/01 00:00:00", + "key": 1422748800000, + "doc_count": 2, + "the_sum": { + "value": 60.0 + }, + "the_movfn": { + "value": 550.0 + } + }, + { + "key_as_string": "2015/03/01 00:00:00", + "key": 1425168000000, + "doc_count": 2, + "the_sum": { + "value": 375.0 + }, + "the_movfn": { + "value": 305.0 + } + } + ] + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took": 11/"took": $body.took/] +// TESTRESPONSE[s/"_shards": \.\.\./"_shards": $body._shards/] +// TESTRESPONSE[s/"hits": \.\.\./"hits": $body.hits/] + + +==== Custom user scripting + +The Moving Function aggregation allows the user to specify any arbitrary script to define custom logic. The script is invoked each time a +new window of data is collected. These values are provided to the script in the `values` variable. The script should then perform some +kind of calculation and emit a single `double` as the result. Emitting `null` is not permitted, although `NaN` and +/- `Inf` are allowed. + +For example, this script will simply return the first value from the window, or `NaN` if no values are available: + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } + }, + "the_movavg": { + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "return values.length > 0 ? values[0] : Double.NaN" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +==== Pre-built Functions + +For convenience, a number of functions have been prebuilt and are available inside the `moving_fn` script context: + +- `max()` +- `min()` +- `sum()` +- `stdDev()` +- `unweightedAvg()` +- `linearWeightedAvg()` +- `ewma()` +- `holt()` +- `holtWinters()` + +The functions are available from the `MovingFunctions` namespace. E.g. `MovingFunctions.max()` + +===== max Function + +This function accepts a collection of doubles and returns the maximum value in that window. `null` and `NaN` values are ignored; the maximum +is only calculated over the real values. If the window is empty, or all values are `null`/`NaN`, `NaN` is returned as the result. + +.`max(double[] values)` Parameters +|=== +|Parameter Name |Description +|`values` |The window of values to find the maximum +|=== + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } + }, + "the_moving_max": { + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "MovingFunctions.max(values)" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +===== min Function + +This function accepts a collection of doubles and returns the minimum value in that window. `null` and `NaN` values are ignored; the minimum +is only calculated over the real values. If the window is empty, or all values are `null`/`NaN`, `NaN` is returned as the result. + +.`min(double[] values)` Parameters +|=== +|Parameter Name |Description +|`values` |The window of values to find the minimum +|=== + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } + }, + "the_moving_min": { + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "MovingFunctions.min(values)" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +===== sum Function + +This function accepts a collection of doubles and returns the sum of the values in that window. `null` and `NaN` values are ignored; +the sum is only calculated over the real values. If the window is empty, or all values are `null`/`NaN`, `0.0` is returned as the result. + +.`sum(double[] values)` Parameters +|=== +|Parameter Name |Description +|`values` |The window of values to find the sum of +|=== + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } + }, + "the_moving_sum": { + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "MovingFunctions.sum(values)" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +===== stdDev Function + +This function accepts a collection of doubles and and average, then returns the standard deviation of the values in that window. +`null` and `NaN` values are ignored; the sum is only calculated over the real values. If the window is empty, or all values are +`null`/`NaN`, `0.0` is returned as the result. + +.`stdDev(double[] values)` Parameters +|=== +|Parameter Name |Description +|`values` |The window of values to find the standard deviation of +|`avg` |The average of the window +|=== + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } + }, + "the_moving_sum": { + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "MovingFunctions.stdDev(values, MovingFunctions.unweightedAvg(values))" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +The `avg` parameter must be provided to the standard deviation function because different styles of averages can be computed on the window +(simple, linearly weighted, etc). The various moving averages that are detailed below can be used to calculate the average for the +standard deviation function. + +===== unweightedAvg Function + +The `unweightedAvg` function calculates the sum of all values in the window, then divides by the size of the window. It is effectively +a simple arithmetic mean of the window. The simple moving average does not perform any time-dependent weighting, which means +the values from a `simple` moving average tend to "lag" behind the real data. + +`null` and `NaN` values are ignored; the average is only calculated over the real values. If the window is empty, or all values are +`null`/`NaN`, `NaN` is returned as the result. This means that the count used in the average calculation is count of non-`null`,non-`NaN` +values. + +.`unweightedAvg(double[] values)` Parameters +|=== +|Parameter Name |Description +|`values` |The window of values to find the sum of +|=== + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } + }, + "the_movavg": { + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "MovingFunctions.unweightedAvg(values)" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +==== linearWeightedAvg Function + +The `linearWeightedAvg` function assigns a linear weighting to points in the series, such that "older" datapoints (e.g. those at +the beginning of the window) contribute a linearly less amount to the total average. The linear weighting helps reduce +the "lag" behind the data's mean, since older points have less influence. + +If the window is empty, or all values are `null`/`NaN`, `NaN` is returned as the result. + +.`linearWeightedAvg(double[] values)` Parameters +|=== +|Parameter Name |Description +|`values` |The window of values to find the sum of +|=== + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } + }, + "the_movavg": { + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "MovingFunctions.linearWeightedAvg(values)" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +==== ewma Function + +The `ewma` function (aka "single-exponential") is similar to the `linearMovAvg` function, +except older data-points become exponentially less important, +rather than linearly less important. The speed at which the importance decays can be controlled with an `alpha` +setting. Small values make the weight decay slowly, which provides greater smoothing and takes into account a larger +portion of the window. Larger valuers make the weight decay quickly, which reduces the impact of older values on the +moving average. This tends to make the moving average track the data more closely but with less smoothing. + +`null` and `NaN` values are ignored; the average is only calculated over the real values. If the window is empty, or all values are +`null`/`NaN`, `NaN` is returned as the result. This means that the count used in the average calculation is count of non-`null`,non-`NaN` +values. + +.`ewma(double[] values, double alpha)` Parameters +|=== +|Parameter Name |Description +|`values` |The window of values to find the sum of +|`alpha` |Exponential decay +|=== + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } + }, + "the_movavg": { + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "MovingFunctions.ewma(values, 0.3)" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + + +==== holt Function + +The `holt` function (aka "double exponential") incorporates a second exponential term which +tracks the data's trend. Single exponential does not perform well when the data has an underlying linear trend. The +double exponential model calculates two values internally: a "level" and a "trend". + +The level calculation is similar to `ewma`, and is an exponentially weighted view of the data. The difference is +that the previously smoothed value is used instead of the raw value, which allows it to stay close to the original series. +The trend calculation looks at the difference between the current and last value (e.g. the slope, or trend, of the +smoothed data). The trend value is also exponentially weighted. + +Values are produced by multiplying the level and trend components. + +`null` and `NaN` values are ignored; the average is only calculated over the real values. If the window is empty, or all values are +`null`/`NaN`, `NaN` is returned as the result. This means that the count used in the average calculation is count of non-`null`,non-`NaN` +values. + +.`holt(double[] values, double alpha)` Parameters +|=== +|Parameter Name |Description +|`values` |The window of values to find the sum of +|`alpha` |Level decay value +|`beta` |Trend decay value +|=== + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } + }, + "the_movavg": { + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "MovingFunctions.holt(values, 0.3, 0.1)" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +In practice, the `alpha` value behaves very similarly in `holtMovAvg` as `ewmaMovAvg`: small values produce more smoothing +and more lag, while larger values produce closer tracking and less lag. The value of `beta` is often difficult +to see. Small values emphasize long-term trends (such as a constant linear trend in the whole series), while larger +values emphasize short-term trends. + +==== holtWinters Function + +The `holtWinters` function (aka "triple exponential") incorporates a third exponential term which +tracks the seasonal aspect of your data. This aggregation therefore smooths based on three components: "level", "trend" +and "seasonality". + +The level and trend calculation is identical to `holt` The seasonal calculation looks at the difference between +the current point, and the point one period earlier. + +Holt-Winters requires a little more handholding than the other moving averages. You need to specify the "periodicity" +of your data: e.g. if your data has cyclic trends every 7 days, you would set `period = 7`. Similarly if there was +a monthly trend, you would set it to `30`. There is currently no periodicity detection, although that is planned +for future enhancements. + +`null` and `NaN` values are ignored; the average is only calculated over the real values. If the window is empty, or all values are +`null`/`NaN`, `NaN` is returned as the result. This means that the count used in the average calculation is count of non-`null`,non-`NaN` +values. + +.`holtWinters(double[] values, double alpha)` Parameters +|=== +|Parameter Name |Description +|`values` |The window of values to find the sum of +|`alpha` |Level decay value +|`beta` |Trend decay value +|`gamma` |Seasonality decay value +|`period` |The periodicity of the data +|`multiplicative` |True if you wish to use multiplicative holt-winters, false to use additive +|=== + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } + }, + "the_movavg": { + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "if (values.length > 5*2) {MovingFunctions.holtWinters(values, 0.3, 0.1, 0.1, 5, false)}" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +[WARNING] +====== +Multiplicative Holt-Winters works by dividing each data point by the seasonal value. This is problematic if any of +your data is zero, or if there are gaps in the data (since this results in a divid-by-zero). To combat this, the +`mult` Holt-Winters pads all values by a very small amount (1*10^-10^) so that all values are non-zero. This affects +the result, but only minimally. If your data is non-zero, or you prefer to see `NaN` when zero's are encountered, +you can disable this behavior with `pad: false` +====== + +===== "Cold Start" + +Unfortunately, due to the nature of Holt-Winters, it requires two periods of data to "bootstrap" the algorithm. This +means that your `window` must always be *at least* twice the size of your period. An exception will be thrown if it +isn't. It also means that Holt-Winters will not emit a value for the first `2 * period` buckets; the current algorithm +does not backcast. + +You'll notice in the above example we have an `if ()` statement checking the size of values. This is checking to make sure +we have two periods worth of data (`5 * 2`, where 5 is the period specified in the `holtWintersMovAvg` function) before calling +the holt-winters function. diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index 693d537d732c1..f70857e66c86f 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -289,6 +289,20 @@ By setting `include_global_state` to false it's possible to prevent the cluster the snapshot. By default, the entire snapshot will fail if one or more indices participating in the snapshot don't have all primary shards available. This behaviour can be changed by setting `partial` to `true`. +Snapshot names can be automatically derived using <>, similarly as when creating +new indices. Note that special characters need to be URI encoded. + +For example, creating a snapshot with the current day in the name, like `snapshot-2018.05.11`, can be achieved with +the following command: +[source,js] +----------------------------------- +# PUT /_snapshot/my_backup/ +PUT /_snapshot/my_backup/%3Csnapshot-%7Bnow%2Fd%7D%3E +----------------------------------- +// CONSOLE +// TEST[continued] + + The index snapshot process is incremental. In the process of making the index snapshot Elasticsearch analyses the list of the index files that are already stored in the repository and copies only files that were created or changed since the last snapshot. That allows multiple snapshots to be preserved in the repository in a compact form. diff --git a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java index a36df9987e7de..02bc304317e68 100644 --- a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java +++ b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java @@ -86,9 +86,13 @@ protected boolean randomizeContentType() { } @Override - protected ClientYamlTestClient initClientYamlTestClient(ClientYamlSuiteRestSpec restSpec, RestClient restClient, - List hosts, Version esVersion) throws IOException { - return new ClientYamlDocsTestClient(restSpec, restClient, hosts, esVersion); + protected ClientYamlTestClient initClientYamlTestClient( + final ClientYamlSuiteRestSpec restSpec, + final RestClient restClient, + final List hosts, + final Version esVersion, + final Version masterVersion) throws IOException { + return new ClientYamlDocsTestClient(restSpec, restClient, hosts, esVersion, masterVersion); } /** diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java index 000e871e92781..ef1e188a22e0a 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java @@ -20,25 +20,13 @@ package org.elasticsearch.nio; import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; -import java.util.LinkedList; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.BiConsumer; import java.util.function.Consumer; public class BytesChannelContext extends SocketChannelContext { - private final ReadConsumer readConsumer; - private final InboundChannelBuffer channelBuffer; - private final LinkedList queued = new LinkedList<>(); - private final AtomicBoolean isClosing = new AtomicBoolean(false); - public BytesChannelContext(NioSocketChannel channel, SocketSelector selector, Consumer exceptionHandler, - ReadConsumer readConsumer, InboundChannelBuffer channelBuffer) { - super(channel, selector, exceptionHandler); - this.readConsumer = readConsumer; - this.channelBuffer = channelBuffer; + ReadWriteHandler handler, InboundChannelBuffer channelBuffer) { + super(channel, selector, exceptionHandler, handler, channelBuffer); } @Override @@ -56,55 +44,30 @@ public int read() throws IOException { channelBuffer.incrementIndex(bytesRead); - int bytesConsumed = Integer.MAX_VALUE; - while (bytesConsumed > 0 && channelBuffer.getIndex() > 0) { - bytesConsumed = readConsumer.consumeReads(channelBuffer); - channelBuffer.release(bytesConsumed); - } + handleReadBytes(); return bytesRead; } - @Override - public void sendMessage(ByteBuffer[] buffers, BiConsumer listener) { - if (isClosing.get()) { - listener.accept(null, new ClosedChannelException()); - return; - } - - BytesWriteOperation writeOperation = new BytesWriteOperation(this, buffers, listener); - SocketSelector selector = getSelector(); - if (selector.isOnCurrentThread() == false) { - selector.queueWrite(writeOperation); - return; - } - - selector.queueWriteInChannelBuffer(writeOperation); - } - - @Override - public void queueWriteOperation(WriteOperation writeOperation) { - getSelector().assertOnSelectorThread(); - queued.add((BytesWriteOperation) writeOperation); - } - @Override public void flushChannel() throws IOException { getSelector().assertOnSelectorThread(); - int ops = queued.size(); - if (ops == 1) { - singleFlush(queued.pop()); - } else if (ops > 1) { - multiFlush(); + boolean lastOpCompleted = true; + FlushOperation flushOperation; + while (lastOpCompleted && (flushOperation = getPendingFlush()) != null) { + try { + if (singleFlush(flushOperation)) { + currentFlushOperationComplete(); + } else { + lastOpCompleted = false; + } + } catch (IOException e) { + currentFlushOperationFailed(e); + throw e; + } } } - @Override - public boolean hasQueuedWriteOps() { - getSelector().assertOnSelectorThread(); - return queued.isEmpty() == false; - } - @Override public void closeChannel() { if (isClosing.compareAndSet(false, true)) { @@ -117,51 +80,12 @@ public boolean selectorShouldClose() { return isPeerClosed() || hasIOException() || isClosing.get(); } - @Override - public void closeFromSelector() throws IOException { - getSelector().assertOnSelectorThread(); - if (channel.isOpen()) { - IOException channelCloseException = null; - try { - super.closeFromSelector(); - } catch (IOException e) { - channelCloseException = e; - } - // Set to true in order to reject new writes before queuing with selector - isClosing.set(true); - channelBuffer.close(); - for (BytesWriteOperation op : queued) { - getSelector().executeFailedListener(op.getListener(), new ClosedChannelException()); - } - queued.clear(); - if (channelCloseException != null) { - throw channelCloseException; - } - } - } - - private void singleFlush(BytesWriteOperation headOp) throws IOException { - try { - int written = flushToChannel(headOp.getBuffersToWrite()); - headOp.incrementIndex(written); - } catch (IOException e) { - getSelector().executeFailedListener(headOp.getListener(), e); - throw e; - } - - if (headOp.isFullyFlushed()) { - getSelector().executeListener(headOp.getListener(), null); - } else { - queued.push(headOp); - } - } - - private void multiFlush() throws IOException { - boolean lastOpCompleted = true; - while (lastOpCompleted && queued.isEmpty() == false) { - BytesWriteOperation op = queued.pop(); - singleFlush(op); - lastOpCompleted = op.isFullyFlushed(); - } + /** + * Returns a boolean indicating if the operation was fully flushed. + */ + private boolean singleFlush(FlushOperation flushOperation) throws IOException { + int written = flushToChannel(flushOperation.getBuffersToWrite()); + flushOperation.incrementIndex(written); + return flushOperation.isFullyFlushed(); } } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java new file mode 100644 index 0000000000000..ba379e2873210 --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.List; +import java.util.function.BiConsumer; + +public abstract class BytesWriteHandler implements ReadWriteHandler { + + private static final List EMPTY_LIST = Collections.emptyList(); + + public WriteOperation createWriteOperation(SocketChannelContext context, Object message, BiConsumer listener) { + assert message instanceof ByteBuffer[] : "This channel only supports messages that are of type: " + ByteBuffer[].class + + ". Found type: " + message.getClass() + "."; + return new FlushReadyWrite(context, (ByteBuffer[]) message, listener); + } + + public List writeToBytes(WriteOperation writeOperation) { + assert writeOperation instanceof FlushReadyWrite : "Write operation must be flush ready"; + return Collections.singletonList((FlushReadyWrite) writeOperation); + } + + public List pollFlushOperations() { + return EMPTY_LIST; + } + + public void close() {} +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteOperation.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/FlushOperation.java similarity index 86% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteOperation.java rename to libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/FlushOperation.java index 37c6e49727634..3102c972a6795 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteOperation.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/FlushOperation.java @@ -23,17 +23,15 @@ import java.util.Arrays; import java.util.function.BiConsumer; -public class BytesWriteOperation implements WriteOperation { +public class FlushOperation { - private final SocketChannelContext channelContext; private final BiConsumer listener; private final ByteBuffer[] buffers; private final int[] offsets; private final int length; private int internalIndex; - public BytesWriteOperation(SocketChannelContext channelContext, ByteBuffer[] buffers, BiConsumer listener) { - this.channelContext = channelContext; + public FlushOperation(ByteBuffer[] buffers, BiConsumer listener) { this.listener = listener; this.buffers = buffers; this.offsets = new int[buffers.length]; @@ -46,16 +44,10 @@ public BytesWriteOperation(SocketChannelContext channelContext, ByteBuffer[] buf length = offset; } - @Override public BiConsumer getListener() { return listener; } - @Override - public SocketChannelContext getChannel() { - return channelContext; - } - public boolean isFullyFlushed() { assert length >= internalIndex : "Should never have an index that is greater than the length [length=" + length + ", index=" + internalIndex + "]"; @@ -84,5 +76,4 @@ public ByteBuffer[] getBuffersToWrite() { return postIndexBuffers; } - } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/FlushReadyWrite.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/FlushReadyWrite.java new file mode 100644 index 0000000000000..65bc8f17aaf4b --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/FlushReadyWrite.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.nio.ByteBuffer; +import java.util.function.BiConsumer; + +public class FlushReadyWrite extends FlushOperation implements WriteOperation { + + private final SocketChannelContext channelContext; + private final ByteBuffer[] buffers; + + FlushReadyWrite(SocketChannelContext channelContext, ByteBuffer[] buffers, BiConsumer listener) { + super(buffers, listener); + this.channelContext = channelContext; + this.buffers = buffers; + } + + @Override + public SocketChannelContext getChannel() { + return channelContext; + } + + @Override + public ByteBuffer[] getObject() { + return buffers; + } +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadWriteHandler.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadWriteHandler.java new file mode 100644 index 0000000000000..f0637ea265280 --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadWriteHandler.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.io.IOException; +import java.util.List; +import java.util.function.BiConsumer; + +/** + * Implements the application specific logic for handling inbound and outbound messages for a channel. + */ +public interface ReadWriteHandler { + + /** + * This method is called when a message is queued with a channel. It can be called from any thread. + * This method should validate that the message is a valid type and return a write operation object + * to be queued with the channel + * + * @param context the channel context + * @param message the message + * @param listener the listener to be called when the message is sent + * @return the write operation to be queued + */ + WriteOperation createWriteOperation(SocketChannelContext context, Object message, BiConsumer listener); + + /** + * This method is called on the event loop thread. It should serialize a write operation object to bytes + * that can be flushed to the raw nio channel. + * + * @param writeOperation to be converted to bytes + * @return the operations to flush the bytes to the channel + */ + List writeToBytes(WriteOperation writeOperation); + + /** + * Returns any flush operations that are ready to flush. This exists as a way to check if any flush + * operations were produced during a read call. + * + * @return flush operations + */ + List pollFlushOperations(); + + /** + * This method handles bytes that have been read from the network. It should return the number of bytes + * consumed so that they can be released. + * + * @param channelBuffer of bytes read from the network + * @return the number of bytes consumed + * @throws IOException if an exception occurs + */ + int consumeReads(InboundChannelBuffer channelBuffer) throws IOException; + + void close() throws IOException; +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java index 3bf47a98e0267..f2d299a9d328a 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java @@ -19,10 +19,16 @@ package org.elasticsearch.nio; +import org.elasticsearch.nio.utils.ExceptionsHelper; + import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; import java.nio.channels.SocketChannel; +import java.util.ArrayList; +import java.util.LinkedList; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; import java.util.function.Consumer; @@ -33,21 +39,28 @@ * close behavior is required, it should be implemented in this context. * * The only methods of the context that should ever be called from a non-selector thread are - * {@link #closeChannel()} and {@link #sendMessage(ByteBuffer[], BiConsumer)}. + * {@link #closeChannel()} and {@link #sendMessage(Object, BiConsumer)}. */ public abstract class SocketChannelContext extends ChannelContext { protected final NioSocketChannel channel; + protected final InboundChannelBuffer channelBuffer; + protected final AtomicBoolean isClosing = new AtomicBoolean(false); + private final ReadWriteHandler readWriteHandler; private final SocketSelector selector; private final CompletableFuture connectContext = new CompletableFuture<>(); + private final LinkedList pendingFlushes = new LinkedList<>(); private boolean ioException; private boolean peerClosed; private Exception connectException; - protected SocketChannelContext(NioSocketChannel channel, SocketSelector selector, Consumer exceptionHandler) { + protected SocketChannelContext(NioSocketChannel channel, SocketSelector selector, Consumer exceptionHandler, + ReadWriteHandler readWriteHandler, InboundChannelBuffer channelBuffer) { super(channel.getRawChannel(), exceptionHandler); this.selector = selector; this.channel = channel; + this.readWriteHandler = readWriteHandler; + this.channelBuffer = channelBuffer; } @Override @@ -108,15 +121,94 @@ public boolean connect() throws IOException { return isConnected; } - public abstract int read() throws IOException; + public void sendMessage(Object message, BiConsumer listener) { + if (isClosing.get()) { + listener.accept(null, new ClosedChannelException()); + return; + } - public abstract void sendMessage(ByteBuffer[] buffers, BiConsumer listener); + WriteOperation writeOperation = readWriteHandler.createWriteOperation(this, message, listener); + + SocketSelector selector = getSelector(); + if (selector.isOnCurrentThread() == false) { + selector.queueWrite(writeOperation); + return; + } + + selector.queueWriteInChannelBuffer(writeOperation); + } + + public void queueWriteOperation(WriteOperation writeOperation) { + getSelector().assertOnSelectorThread(); + pendingFlushes.addAll(readWriteHandler.writeToBytes(writeOperation)); + } - public abstract void queueWriteOperation(WriteOperation writeOperation); + public abstract int read() throws IOException; public abstract void flushChannel() throws IOException; - public abstract boolean hasQueuedWriteOps(); + protected void currentFlushOperationFailed(IOException e) { + FlushOperation flushOperation = pendingFlushes.pollFirst(); + getSelector().executeFailedListener(flushOperation.getListener(), e); + } + + protected void currentFlushOperationComplete() { + FlushOperation flushOperation = pendingFlushes.pollFirst(); + getSelector().executeListener(flushOperation.getListener(), null); + } + + protected FlushOperation getPendingFlush() { + return pendingFlushes.peekFirst(); + } + + @Override + public void closeFromSelector() throws IOException { + getSelector().assertOnSelectorThread(); + if (channel.isOpen()) { + ArrayList closingExceptions = new ArrayList<>(3); + try { + super.closeFromSelector(); + } catch (IOException e) { + closingExceptions.add(e); + } + // Set to true in order to reject new writes before queuing with selector + isClosing.set(true); + + // Poll for new flush operations to close + pendingFlushes.addAll(readWriteHandler.pollFlushOperations()); + FlushOperation flushOperation; + while ((flushOperation = pendingFlushes.pollFirst()) != null) { + selector.executeFailedListener(flushOperation.getListener(), new ClosedChannelException()); + } + + try { + readWriteHandler.close(); + } catch (IOException e) { + closingExceptions.add(e); + } + channelBuffer.close(); + + if (closingExceptions.isEmpty() == false) { + ExceptionsHelper.rethrowAndSuppress(closingExceptions); + } + } + } + + protected void handleReadBytes() throws IOException { + int bytesConsumed = Integer.MAX_VALUE; + while (bytesConsumed > 0 && channelBuffer.getIndex() > 0) { + bytesConsumed = readWriteHandler.consumeReads(channelBuffer); + channelBuffer.release(bytesConsumed); + } + + // Some protocols might produce messages to flush during a read operation. + pendingFlushes.addAll(readWriteHandler.pollFlushOperations()); + } + + public boolean readyForFlush() { + getSelector().assertOnSelectorThread(); + return pendingFlushes.isEmpty() == false; + } /** * This method indicates if a selector should close this channel. @@ -178,9 +270,4 @@ protected int flushToChannel(ByteBuffer[] buffers) throws IOException { throw e; } } - - @FunctionalInterface - public interface ReadConsumer { - int consumeReads(InboundChannelBuffer channelBuffer) throws IOException; - } } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java index b1f738647619b..cacee47e96196 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java @@ -48,7 +48,7 @@ protected void handleRegistration(SocketChannelContext context) throws IOExcepti context.register(); SelectionKey selectionKey = context.getSelectionKey(); selectionKey.attach(context); - if (context.hasQueuedWriteOps()) { + if (context.readyForFlush()) { SelectionKeyUtils.setConnectReadAndWriteInterested(selectionKey); } else { SelectionKeyUtils.setConnectAndReadInterested(selectionKey); @@ -150,7 +150,7 @@ protected void postHandling(SocketChannelContext context) { } else { SelectionKey selectionKey = context.getSelectionKey(); boolean currentlyWriteInterested = SelectionKeyUtils.isWriteInterested(selectionKey); - boolean pendingWrites = context.hasQueuedWriteOps(); + boolean pendingWrites = context.readyForFlush(); if (currentlyWriteInterested == false && pendingWrites) { SelectionKeyUtils.setWriteInterested(selectionKey); } else if (currentlyWriteInterested && pendingWrites == false) { diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java index 665b9f7759e11..25de6ab7326f3 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java @@ -16,7 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - package org.elasticsearch.nio; import java.util.function.BiConsumer; @@ -24,11 +23,14 @@ /** * This is a basic write operation that can be queued with a channel. The only requirements of a write * operation is that is has a listener and a reference to its channel. The actual conversion of the write - * operation implementation to bytes will be performed by the {@link SocketChannelContext}. + * operation implementation to bytes will be performed by the {@link ReadWriteHandler}. */ public interface WriteOperation { BiConsumer getListener(); SocketChannelContext getChannel(); + + Object getObject(); + } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java index d9de0ab1361c3..addfcdedbf99f 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java @@ -19,23 +19,19 @@ package org.elasticsearch.nio; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.test.ESTestCase; import org.junit.Before; -import org.mockito.ArgumentCaptor; import java.io.IOException; import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; import java.nio.channels.SocketChannel; import java.util.function.BiConsumer; import java.util.function.Consumer; -import java.util.function.Supplier; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyInt; -import static org.mockito.Matchers.isNull; -import static org.mockito.Matchers.same; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -43,20 +39,19 @@ public class BytesChannelContextTests extends ESTestCase { - private SocketChannelContext.ReadConsumer readConsumer; + private CheckedFunction readConsumer; private NioSocketChannel channel; private SocketChannel rawChannel; private BytesChannelContext context; private InboundChannelBuffer channelBuffer; private SocketSelector selector; - private Consumer exceptionHandler; private BiConsumer listener; private int messageLength; @Before @SuppressWarnings("unchecked") public void init() { - readConsumer = mock(SocketChannelContext.ReadConsumer.class); + readConsumer = mock(CheckedFunction.class); messageLength = randomInt(96) + 20; selector = mock(SocketSelector.class); @@ -64,9 +59,9 @@ public void init() { channel = mock(NioSocketChannel.class); rawChannel = mock(SocketChannel.class); channelBuffer = InboundChannelBuffer.allocatingInstance(); - exceptionHandler = mock(Consumer.class); + TestReadWriteHandler handler = new TestReadWriteHandler(readConsumer); when(channel.getRawChannel()).thenReturn(rawChannel); - context = new BytesChannelContext(channel, selector, exceptionHandler, readConsumer, channelBuffer); + context = new BytesChannelContext(channel, selector, mock(Consumer.class), handler, channelBuffer); when(selector.isOnCurrentThread()).thenReturn(true); } @@ -80,13 +75,13 @@ public void testSuccessfulRead() throws IOException { return bytes.length; }); - when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, 0); + when(readConsumer.apply(channelBuffer)).thenReturn(messageLength, 0); assertEquals(messageLength, context.read()); assertEquals(0, channelBuffer.getIndex()); assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity()); - verify(readConsumer, times(1)).consumeReads(channelBuffer); + verify(readConsumer, times(1)).apply(channelBuffer); } public void testMultipleReadsConsumed() throws IOException { @@ -98,13 +93,13 @@ public void testMultipleReadsConsumed() throws IOException { return bytes.length; }); - when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, messageLength, 0); + when(readConsumer.apply(channelBuffer)).thenReturn(messageLength, messageLength, 0); assertEquals(bytes.length, context.read()); assertEquals(0, channelBuffer.getIndex()); assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity()); - verify(readConsumer, times(2)).consumeReads(channelBuffer); + verify(readConsumer, times(2)).apply(channelBuffer); } public void testPartialRead() throws IOException { @@ -117,20 +112,20 @@ public void testPartialRead() throws IOException { }); - when(readConsumer.consumeReads(channelBuffer)).thenReturn(0); + when(readConsumer.apply(channelBuffer)).thenReturn(0); assertEquals(messageLength, context.read()); assertEquals(bytes.length, channelBuffer.getIndex()); - verify(readConsumer, times(1)).consumeReads(channelBuffer); + verify(readConsumer, times(1)).apply(channelBuffer); - when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength * 2, 0); + when(readConsumer.apply(channelBuffer)).thenReturn(messageLength * 2, 0); assertEquals(messageLength, context.read()); assertEquals(0, channelBuffer.getIndex()); assertEquals(BigArrays.BYTE_PAGE_SIZE - (bytes.length * 2), channelBuffer.getCapacity()); - verify(readConsumer, times(2)).consumeReads(channelBuffer); + verify(readConsumer, times(2)).apply(channelBuffer); } public void testReadThrowsIOException() throws IOException { @@ -157,186 +152,100 @@ public void testReadLessThanZeroMeansReadyForClose() throws IOException { assertTrue(context.selectorShouldClose()); } - @SuppressWarnings("unchecked") - public void testCloseClosesChannelBuffer() throws IOException { - try (SocketChannel realChannel = SocketChannel.open()) { - when(channel.getRawChannel()).thenReturn(realChannel); - context = new BytesChannelContext(channel, selector, exceptionHandler, readConsumer, channelBuffer); - - when(channel.isOpen()).thenReturn(true); - Runnable closer = mock(Runnable.class); - Supplier pageSupplier = () -> new InboundChannelBuffer.Page(ByteBuffer.allocate(1 << 14), closer); - InboundChannelBuffer buffer = new InboundChannelBuffer(pageSupplier); - buffer.ensureCapacity(1); - BytesChannelContext context = new BytesChannelContext(channel, selector, exceptionHandler, readConsumer, buffer); - context.closeFromSelector(); - verify(closer).run(); - } - } - - public void testWriteFailsIfClosing() { - context.closeChannel(); - - ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; - context.sendMessage(buffers, listener); - - verify(listener).accept(isNull(Void.class), any(ClosedChannelException.class)); - } - - public void testSendMessageFromDifferentThreadIsQueuedWithSelector() throws Exception { - ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(BytesWriteOperation.class); - - when(selector.isOnCurrentThread()).thenReturn(false); - - ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; - context.sendMessage(buffers, listener); - - verify(selector).queueWrite(writeOpCaptor.capture()); - BytesWriteOperation writeOp = writeOpCaptor.getValue(); - - assertSame(listener, writeOp.getListener()); - assertSame(context, writeOp.getChannel()); - assertEquals(buffers[0], writeOp.getBuffersToWrite()[0]); - } - - public void testSendMessageFromSameThreadIsQueuedInChannel() { - ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(BytesWriteOperation.class); - - ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; - context.sendMessage(buffers, listener); - - verify(selector).queueWriteInChannelBuffer(writeOpCaptor.capture()); - BytesWriteOperation writeOp = writeOpCaptor.getValue(); - - assertSame(listener, writeOp.getListener()); - assertSame(context, writeOp.getChannel()); - assertEquals(buffers[0], writeOp.getBuffersToWrite()[0]); - } - - public void testWriteIsQueuedInChannel() { - assertFalse(context.hasQueuedWriteOps()); - - ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; - context.queueWriteOperation(new BytesWriteOperation(context, buffer, listener)); - - assertTrue(context.hasQueuedWriteOps()); - } - - @SuppressWarnings("unchecked") - public void testWriteOpsClearedOnClose() throws Exception { - try (SocketChannel realChannel = SocketChannel.open()) { - when(channel.getRawChannel()).thenReturn(realChannel); - context = new BytesChannelContext(channel, selector, exceptionHandler, readConsumer, channelBuffer); - - assertFalse(context.hasQueuedWriteOps()); - - ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; - context.queueWriteOperation(new BytesWriteOperation(context, buffer, listener)); - - assertTrue(context.hasQueuedWriteOps()); - - when(channel.isOpen()).thenReturn(true); - context.closeFromSelector(); - - verify(selector).executeFailedListener(same(listener), any(ClosedChannelException.class)); - - assertFalse(context.hasQueuedWriteOps()); - } - } - + @SuppressWarnings("varargs") public void testQueuedWriteIsFlushedInFlushCall() throws Exception { - assertFalse(context.hasQueuedWriteOps()); + assertFalse(context.readyForFlush()); ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; - BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); - context.queueWriteOperation(writeOperation); - assertTrue(context.hasQueuedWriteOps()); + FlushReadyWrite flushOperation = mock(FlushReadyWrite.class); + context.queueWriteOperation(flushOperation); + + assertTrue(context.readyForFlush()); - when(writeOperation.getBuffersToWrite()).thenReturn(buffers); - when(writeOperation.isFullyFlushed()).thenReturn(true); - when(writeOperation.getListener()).thenReturn(listener); + when(flushOperation.getBuffersToWrite()).thenReturn(buffers); + when(flushOperation.isFullyFlushed()).thenReturn(true); + when(flushOperation.getListener()).thenReturn(listener); context.flushChannel(); verify(rawChannel).write(buffers, 0, buffers.length); verify(selector).executeListener(listener, null); - assertFalse(context.hasQueuedWriteOps()); + assertFalse(context.readyForFlush()); } public void testPartialFlush() throws IOException { - assertFalse(context.hasQueuedWriteOps()); - - BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); - context.queueWriteOperation(writeOperation); + assertFalse(context.readyForFlush()); + FlushReadyWrite flushOperation = mock(FlushReadyWrite.class); + context.queueWriteOperation(flushOperation); + assertTrue(context.readyForFlush()); - assertTrue(context.hasQueuedWriteOps()); - - when(writeOperation.isFullyFlushed()).thenReturn(false); - when(writeOperation.getBuffersToWrite()).thenReturn(new ByteBuffer[0]); + when(flushOperation.isFullyFlushed()).thenReturn(false); + when(flushOperation.getBuffersToWrite()).thenReturn(new ByteBuffer[0]); context.flushChannel(); verify(listener, times(0)).accept(null, null); - assertTrue(context.hasQueuedWriteOps()); + assertTrue(context.readyForFlush()); } @SuppressWarnings("unchecked") public void testMultipleWritesPartialFlushes() throws IOException { - assertFalse(context.hasQueuedWriteOps()); + assertFalse(context.readyForFlush()); BiConsumer listener2 = mock(BiConsumer.class); - BytesWriteOperation writeOperation1 = mock(BytesWriteOperation.class); - BytesWriteOperation writeOperation2 = mock(BytesWriteOperation.class); - when(writeOperation1.getBuffersToWrite()).thenReturn(new ByteBuffer[0]); - when(writeOperation2.getBuffersToWrite()).thenReturn(new ByteBuffer[0]); - when(writeOperation1.getListener()).thenReturn(listener); - when(writeOperation2.getListener()).thenReturn(listener2); - context.queueWriteOperation(writeOperation1); - context.queueWriteOperation(writeOperation2); - - assertTrue(context.hasQueuedWriteOps()); - - when(writeOperation1.isFullyFlushed()).thenReturn(true); - when(writeOperation2.isFullyFlushed()).thenReturn(false); + FlushReadyWrite flushOperation1 = mock(FlushReadyWrite.class); + FlushReadyWrite flushOperation2 = mock(FlushReadyWrite.class); + when(flushOperation1.getBuffersToWrite()).thenReturn(new ByteBuffer[0]); + when(flushOperation2.getBuffersToWrite()).thenReturn(new ByteBuffer[0]); + when(flushOperation1.getListener()).thenReturn(listener); + when(flushOperation2.getListener()).thenReturn(listener2); + + context.queueWriteOperation(flushOperation1); + context.queueWriteOperation(flushOperation2); + + assertTrue(context.readyForFlush()); + + when(flushOperation1.isFullyFlushed()).thenReturn(true); + when(flushOperation2.isFullyFlushed()).thenReturn(false); context.flushChannel(); verify(selector).executeListener(listener, null); verify(listener2, times(0)).accept(null, null); - assertTrue(context.hasQueuedWriteOps()); + assertTrue(context.readyForFlush()); - when(writeOperation2.isFullyFlushed()).thenReturn(true); + when(flushOperation2.isFullyFlushed()).thenReturn(true); context.flushChannel(); verify(selector).executeListener(listener2, null); - assertFalse(context.hasQueuedWriteOps()); + assertFalse(context.readyForFlush()); } public void testWhenIOExceptionThrownListenerIsCalled() throws IOException { - assertFalse(context.hasQueuedWriteOps()); + assertFalse(context.readyForFlush()); ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; - BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); - context.queueWriteOperation(writeOperation); + FlushReadyWrite flushOperation = mock(FlushReadyWrite.class); + context.queueWriteOperation(flushOperation); - assertTrue(context.hasQueuedWriteOps()); + assertTrue(context.readyForFlush()); IOException exception = new IOException(); - when(writeOperation.getBuffersToWrite()).thenReturn(buffers); + when(flushOperation.getBuffersToWrite()).thenReturn(buffers); when(rawChannel.write(buffers, 0, buffers.length)).thenThrow(exception); - when(writeOperation.getListener()).thenReturn(listener); + when(flushOperation.getListener()).thenReturn(listener); expectThrows(IOException.class, () -> context.flushChannel()); verify(selector).executeFailedListener(listener, exception); - assertFalse(context.hasQueuedWriteOps()); + assertFalse(context.readyForFlush()); } public void testWriteIOExceptionMeansChannelReadyToClose() throws IOException { ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; - BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); - context.queueWriteOperation(writeOperation); + FlushReadyWrite flushOperation = mock(FlushReadyWrite.class); + context.queueWriteOperation(flushOperation); IOException exception = new IOException(); - when(writeOperation.getBuffersToWrite()).thenReturn(buffers); + when(flushOperation.getBuffersToWrite()).thenReturn(buffers); when(rawChannel.write(buffers, 0, buffers.length)).thenThrow(exception); assertFalse(context.selectorShouldClose()); @@ -344,7 +253,7 @@ public void testWriteIOExceptionMeansChannelReadyToClose() throws IOException { assertTrue(context.selectorShouldClose()); } - public void initiateCloseSchedulesCloseWithSelector() { + public void testInitiateCloseSchedulesCloseWithSelector() { context.closeChannel(); verify(selector).queueChannelClose(channel); } @@ -356,4 +265,18 @@ private static byte[] createMessage(int length) { } return bytes; } + + private static class TestReadWriteHandler extends BytesWriteHandler { + + private final CheckedFunction fn; + + private TestReadWriteHandler(CheckedFunction fn) { + this.fn = fn; + } + + @Override + public int consumeReads(InboundChannelBuffer channelBuffer) throws IOException { + return fn.apply(channelBuffer); + } + } } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesWriteOperationTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/FlushOperationTests.java similarity index 87% rename from libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesWriteOperationTests.java rename to libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/FlushOperationTests.java index 05afc80a49086..a244de51f3591 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesWriteOperationTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/FlushOperationTests.java @@ -29,22 +29,19 @@ import static org.mockito.Mockito.mock; -public class BytesWriteOperationTests extends ESTestCase { +public class FlushOperationTests extends ESTestCase { - private SocketChannelContext channelContext; private BiConsumer listener; @Before @SuppressWarnings("unchecked") public void setFields() { - channelContext = mock(SocketChannelContext.class); listener = mock(BiConsumer.class); - } public void testFullyFlushedMarker() { ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; - BytesWriteOperation writeOp = new BytesWriteOperation(channelContext, buffers, listener); + FlushOperation writeOp = new FlushOperation(buffers, listener); writeOp.incrementIndex(10); @@ -53,7 +50,7 @@ public void testFullyFlushedMarker() { public void testPartiallyFlushedMarker() { ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; - BytesWriteOperation writeOp = new BytesWriteOperation(channelContext, buffers, listener); + FlushOperation writeOp = new FlushOperation(buffers, listener); writeOp.incrementIndex(5); @@ -62,7 +59,7 @@ public void testPartiallyFlushedMarker() { public void testMultipleFlushesWithCompositeBuffer() throws IOException { ByteBuffer[] buffers = {ByteBuffer.allocate(10), ByteBuffer.allocate(15), ByteBuffer.allocate(3)}; - BytesWriteOperation writeOp = new BytesWriteOperation(channelContext, buffers, listener); + FlushOperation writeOp = new FlushOperation(buffers, listener); ArgumentCaptor buffersCaptor = ArgumentCaptor.forClass(ByteBuffer[].class); diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java index 17e6b7acba283..d6787f7cc1534 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java @@ -21,18 +21,27 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Before; +import org.mockito.ArgumentCaptor; import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; import java.nio.channels.SocketChannel; +import java.util.Arrays; +import java.util.Collections; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.Consumer; +import java.util.function.Supplier; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyInt; +import static org.mockito.Matchers.isNull; +import static org.mockito.Matchers.same; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class SocketChannelContextTests extends ESTestCase { @@ -41,6 +50,9 @@ public class SocketChannelContextTests extends ESTestCase { private TestSocketChannelContext context; private Consumer exceptionHandler; private NioSocketChannel channel; + private BiConsumer listener; + private SocketSelector selector; + private ReadWriteHandler readWriteHandler; @SuppressWarnings("unchecked") @Before @@ -49,9 +61,15 @@ public void setup() throws Exception { rawChannel = mock(SocketChannel.class); channel = mock(NioSocketChannel.class); + listener = mock(BiConsumer.class); when(channel.getRawChannel()).thenReturn(rawChannel); exceptionHandler = mock(Consumer.class); - context = new TestSocketChannelContext(channel, mock(SocketSelector.class), exceptionHandler); + selector = mock(SocketSelector.class); + readWriteHandler = mock(ReadWriteHandler.class); + InboundChannelBuffer channelBuffer = InboundChannelBuffer.allocatingInstance(); + context = new TestSocketChannelContext(channel, selector, exceptionHandler, readWriteHandler, channelBuffer); + + when(selector.isOnCurrentThread()).thenReturn(true); } public void testIOExceptionSetIfEncountered() throws IOException { @@ -119,10 +137,147 @@ public void testConnectFails() throws IOException { assertSame(ioException, exception.get()); } + public void testWriteFailsIfClosing() { + context.closeChannel(); + + ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; + context.sendMessage(buffers, listener); + + verify(listener).accept(isNull(Void.class), any(ClosedChannelException.class)); + } + + public void testSendMessageFromDifferentThreadIsQueuedWithSelector() throws Exception { + ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(WriteOperation.class); + + when(selector.isOnCurrentThread()).thenReturn(false); + + ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; + WriteOperation writeOperation = mock(WriteOperation.class); + when(readWriteHandler.createWriteOperation(context, buffers, listener)).thenReturn(writeOperation); + context.sendMessage(buffers, listener); + + verify(selector).queueWrite(writeOpCaptor.capture()); + WriteOperation writeOp = writeOpCaptor.getValue(); + + assertSame(writeOperation, writeOp); + } + + public void testSendMessageFromSameThreadIsQueuedInChannel() { + ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(WriteOperation.class); + + ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; + WriteOperation writeOperation = mock(WriteOperation.class); + when(readWriteHandler.createWriteOperation(context, buffers, listener)).thenReturn(writeOperation); + context.sendMessage(buffers, listener); + + verify(selector).queueWriteInChannelBuffer(writeOpCaptor.capture()); + WriteOperation writeOp = writeOpCaptor.getValue(); + + assertSame(writeOperation, writeOp); + } + + public void testWriteIsQueuedInChannel() { + assertFalse(context.readyForFlush()); + + ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; + FlushReadyWrite writeOperation = new FlushReadyWrite(context, buffer, listener); + when(readWriteHandler.writeToBytes(writeOperation)).thenReturn(Collections.singletonList(writeOperation)); + context.queueWriteOperation(writeOperation); + + verify(readWriteHandler).writeToBytes(writeOperation); + assertTrue(context.readyForFlush()); + } + + public void testHandleReadBytesWillCheckForNewFlushOperations() throws IOException { + assertFalse(context.readyForFlush()); + when(readWriteHandler.pollFlushOperations()).thenReturn(Collections.singletonList(mock(FlushOperation.class))); + context.handleReadBytes(); + assertTrue(context.readyForFlush()); + } + + @SuppressWarnings({"unchecked", "varargs"}) + public void testFlushOpsClearedOnClose() throws Exception { + try (SocketChannel realChannel = SocketChannel.open()) { + when(channel.getRawChannel()).thenReturn(realChannel); + InboundChannelBuffer channelBuffer = InboundChannelBuffer.allocatingInstance(); + context = new TestSocketChannelContext(channel, selector, exceptionHandler, readWriteHandler, channelBuffer); + + assertFalse(context.readyForFlush()); + + ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; + WriteOperation writeOperation = mock(WriteOperation.class); + BiConsumer listener2 = mock(BiConsumer.class); + when(readWriteHandler.writeToBytes(writeOperation)).thenReturn(Arrays.asList(new FlushOperation(buffer, listener), + new FlushOperation(buffer, listener2))); + context.queueWriteOperation(writeOperation); + + assertTrue(context.readyForFlush()); + + when(channel.isOpen()).thenReturn(true); + context.closeFromSelector(); + + verify(selector, times(1)).executeFailedListener(same(listener), any(ClosedChannelException.class)); + verify(selector, times(1)).executeFailedListener(same(listener2), any(ClosedChannelException.class)); + + assertFalse(context.readyForFlush()); + } + } + + @SuppressWarnings({"unchecked", "varargs"}) + public void testWillPollForFlushOpsToClose() throws Exception { + try (SocketChannel realChannel = SocketChannel.open()) { + when(channel.getRawChannel()).thenReturn(realChannel); + InboundChannelBuffer channelBuffer = InboundChannelBuffer.allocatingInstance(); + context = new TestSocketChannelContext(channel, selector, exceptionHandler, readWriteHandler, channelBuffer); + + + ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; + BiConsumer listener2 = mock(BiConsumer.class); + + assertFalse(context.readyForFlush()); + when(channel.isOpen()).thenReturn(true); + when(readWriteHandler.pollFlushOperations()).thenReturn(Arrays.asList(new FlushOperation(buffer, listener), + new FlushOperation(buffer, listener2))); + context.closeFromSelector(); + + verify(selector, times(1)).executeFailedListener(same(listener), any(ClosedChannelException.class)); + verify(selector, times(1)).executeFailedListener(same(listener2), any(ClosedChannelException.class)); + + assertFalse(context.readyForFlush()); + } + } + + public void testCloseClosesWriteProducer() throws IOException { + try (SocketChannel realChannel = SocketChannel.open()) { + when(channel.getRawChannel()).thenReturn(realChannel); + when(channel.isOpen()).thenReturn(true); + InboundChannelBuffer buffer = InboundChannelBuffer.allocatingInstance(); + BytesChannelContext context = new BytesChannelContext(channel, selector, exceptionHandler, readWriteHandler, buffer); + context.closeFromSelector(); + verify(readWriteHandler).close(); + } + } + + @SuppressWarnings("unchecked") + public void testCloseClosesChannelBuffer() throws IOException { + try (SocketChannel realChannel = SocketChannel.open()) { + when(channel.getRawChannel()).thenReturn(realChannel); + when(channel.isOpen()).thenReturn(true); + Runnable closer = mock(Runnable.class); + Supplier pageSupplier = () -> new InboundChannelBuffer.Page(ByteBuffer.allocate(1 << 14), closer); + InboundChannelBuffer buffer = new InboundChannelBuffer(pageSupplier); + buffer.ensureCapacity(1); + TestSocketChannelContext context = new TestSocketChannelContext(channel, selector, exceptionHandler, readWriteHandler, buffer); + context.closeFromSelector(); + verify(closer).run(); + } + } + private static class TestSocketChannelContext extends SocketChannelContext { - private TestSocketChannelContext(NioSocketChannel channel, SocketSelector selector, Consumer exceptionHandler) { - super(channel, selector, exceptionHandler); + private TestSocketChannelContext(NioSocketChannel channel, SocketSelector selector, Consumer exceptionHandler, + ReadWriteHandler readWriteHandler, InboundChannelBuffer channelBuffer) { + super(channel, selector, exceptionHandler, readWriteHandler, channelBuffer); } @Override @@ -135,16 +290,6 @@ public int read() throws IOException { } } - @Override - public void sendMessage(ByteBuffer[] buffers, BiConsumer listener) { - - } - - @Override - public void queueWriteOperation(WriteOperation writeOperation) { - - } - @Override public void flushChannel() throws IOException { if (randomBoolean()) { @@ -155,11 +300,6 @@ public void flushChannel() throws IOException { } } - @Override - public boolean hasQueuedWriteOps() { - return false; - } - @Override public boolean selectorShouldClose() { return false; @@ -167,7 +307,15 @@ public boolean selectorShouldClose() { @Override public void closeChannel() { + isClosing.set(true); + } + } + private static byte[] createMessage(int length) { + byte[] bytes = new byte[length]; + for (int i = 0; i < length; ++i) { + bytes[i] = randomByte(); } + return bytes; } } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java index 4f476c1ff6b22..a80563f7d74db 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java @@ -26,6 +26,7 @@ import java.nio.channels.CancelledKeyException; import java.nio.channels.SelectionKey; import java.nio.channels.SocketChannel; +import java.util.Collections; import java.util.function.Consumer; import static org.mockito.Mockito.mock; @@ -37,6 +38,7 @@ public class SocketEventHandlerTests extends ESTestCase { private Consumer exceptionHandler; + private ReadWriteHandler readWriteHandler; private SocketEventHandler handler; private NioSocketChannel channel; private SocketChannel rawChannel; @@ -46,13 +48,14 @@ public class SocketEventHandlerTests extends ESTestCase { @SuppressWarnings("unchecked") public void setUpHandler() throws IOException { exceptionHandler = mock(Consumer.class); + readWriteHandler = mock(ReadWriteHandler.class); SocketSelector selector = mock(SocketSelector.class); handler = new SocketEventHandler(logger); rawChannel = mock(SocketChannel.class); channel = new NioSocketChannel(rawChannel); when(rawChannel.finishConnect()).thenReturn(true); - context = new DoNotRegisterContext(channel, selector, exceptionHandler, new TestSelectionKey(0)); + context = new DoNotRegisterContext(channel, selector, exceptionHandler, new TestSelectionKey(0), readWriteHandler); channel.setContext(context); handler.handleRegistration(context); @@ -83,7 +86,9 @@ public void testRegisterAddsAttachment() throws IOException { } public void testRegisterWithPendingWritesAddsOP_CONNECTAndOP_READAndOP_WRITEInterest() throws IOException { - channel.getContext().queueWriteOperation(mock(BytesWriteOperation.class)); + FlushReadyWrite flushReadyWrite = mock(FlushReadyWrite.class); + when(readWriteHandler.writeToBytes(flushReadyWrite)).thenReturn(Collections.singletonList(flushReadyWrite)); + channel.getContext().queueWriteOperation(flushReadyWrite); handler.handleRegistration(context); assertEquals(SelectionKey.OP_READ | SelectionKey.OP_CONNECT | SelectionKey.OP_WRITE, context.getSelectionKey().interestOps()); } @@ -162,7 +167,7 @@ public void testPostHandlingWillAddWriteIfNecessary() throws IOException { TestSelectionKey selectionKey = new TestSelectionKey(SelectionKey.OP_READ); SocketChannelContext context = mock(SocketChannelContext.class); when(context.getSelectionKey()).thenReturn(selectionKey); - when(context.hasQueuedWriteOps()).thenReturn(true); + when(context.readyForFlush()).thenReturn(true); NioSocketChannel channel = mock(NioSocketChannel.class); when(channel.getContext()).thenReturn(context); @@ -176,7 +181,7 @@ public void testPostHandlingWillRemoveWriteIfNecessary() throws IOException { TestSelectionKey key = new TestSelectionKey(SelectionKey.OP_READ | SelectionKey.OP_WRITE); SocketChannelContext context = mock(SocketChannelContext.class); when(context.getSelectionKey()).thenReturn(key); - when(context.hasQueuedWriteOps()).thenReturn(false); + when(context.readyForFlush()).thenReturn(false); NioSocketChannel channel = mock(NioSocketChannel.class); when(channel.getContext()).thenReturn(context); @@ -192,8 +197,8 @@ private class DoNotRegisterContext extends BytesChannelContext { private final TestSelectionKey selectionKey; DoNotRegisterContext(NioSocketChannel channel, SocketSelector selector, Consumer exceptionHandler, - TestSelectionKey selectionKey) { - super(channel, selector, exceptionHandler, mock(ReadConsumer.class), InboundChannelBuffer.allocatingInstance()); + TestSelectionKey selectionKey, ReadWriteHandler handler) { + super(channel, selector, exceptionHandler, handler, InboundChannelBuffer.allocatingInstance()); this.selectionKey = selectionKey; } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java index 223f14455f96d..a68f5c05dad5a 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java @@ -117,13 +117,13 @@ public void testSuccessfullyRegisterChannelWillAttemptConnect() throws Exception public void testQueueWriteWhenNotRunning() throws Exception { socketSelector.close(); - socketSelector.queueWrite(new BytesWriteOperation(channelContext, buffers, listener)); + socketSelector.queueWrite(new FlushReadyWrite(channelContext, buffers, listener)); verify(listener).accept(isNull(Void.class), any(ClosedSelectorException.class)); } public void testQueueWriteChannelIsClosed() throws Exception { - BytesWriteOperation writeOperation = new BytesWriteOperation(channelContext, buffers, listener); + WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); socketSelector.queueWrite(writeOperation); when(channelContext.isOpen()).thenReturn(false); @@ -136,7 +136,7 @@ public void testQueueWriteChannelIsClosed() throws Exception { public void testQueueWriteSelectionKeyThrowsException() throws Exception { SelectionKey selectionKey = mock(SelectionKey.class); - BytesWriteOperation writeOperation = new BytesWriteOperation(channelContext, buffers, listener); + WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); CancelledKeyException cancelledKeyException = new CancelledKeyException(); socketSelector.queueWrite(writeOperation); @@ -149,7 +149,7 @@ public void testQueueWriteSelectionKeyThrowsException() throws Exception { } public void testQueueWriteSuccessful() throws Exception { - BytesWriteOperation writeOperation = new BytesWriteOperation(channelContext, buffers, listener); + WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); socketSelector.queueWrite(writeOperation); assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) == 0); @@ -161,7 +161,7 @@ public void testQueueWriteSuccessful() throws Exception { } public void testQueueDirectlyInChannelBufferSuccessful() throws Exception { - BytesWriteOperation writeOperation = new BytesWriteOperation(channelContext, buffers, listener); + WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) == 0); @@ -174,7 +174,7 @@ public void testQueueDirectlyInChannelBufferSuccessful() throws Exception { public void testQueueDirectlyInChannelBufferSelectionKeyThrowsException() throws Exception { SelectionKey selectionKey = mock(SelectionKey.class); - BytesWriteOperation writeOperation = new BytesWriteOperation(channelContext, buffers, listener); + WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); CancelledKeyException cancelledKeyException = new CancelledKeyException(); when(channelContext.getSelectionKey()).thenReturn(selectionKey); @@ -277,7 +277,7 @@ public void testCleanup() throws Exception { socketSelector.preSelect(); - socketSelector.queueWrite(new BytesWriteOperation(channelContext, buffers, listener)); + socketSelector.queueWrite(new FlushReadyWrite(channelContext, buffers, listener)); socketSelector.scheduleForRegistration(unregisteredChannel); TestSelectionKey testSelectionKey = new TestSelectionKey(0); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java index fd797c4340a8f..9969e6b38e54a 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java @@ -77,7 +77,7 @@ public static MultiSearchTemplateRequest parseRequest(RestRequest restRequest, b RestMultiSearchAction.parseMultiLineRequest(restRequest, multiRequest.indicesOptions(), allowExplicitIndex, (searchRequest, bytes) -> { - SearchTemplateRequest searchTemplateRequest = RestSearchTemplateAction.parse(bytes); + SearchTemplateRequest searchTemplateRequest = SearchTemplateRequest.fromXContent(bytes); if (searchTemplateRequest.getScript() != null) { searchTemplateRequest.setRequest(searchRequest); multiRequest.add(searchTemplateRequest); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestRenderSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestRenderSearchTemplateAction.java index d8c67839cb80f..75acc09424359 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestRenderSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestRenderSearchTemplateAction.java @@ -52,7 +52,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client // Creates the render template request SearchTemplateRequest renderRequest; try (XContentParser parser = request.contentOrSourceParamParser()) { - renderRequest = RestSearchTemplateAction.parse(parser); + renderRequest = SearchTemplateRequest.fromXContent(parser); } renderRequest.setSimulate(true); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java index 7ab9aa6003334..f42afcc19b80f 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java @@ -47,33 +47,6 @@ public class RestSearchTemplateAction extends BaseRestHandler { private static final Set RESPONSE_PARAMS = Collections.singleton(RestSearchAction.TYPED_KEYS_PARAM); - private static final ObjectParser PARSER; - static { - PARSER = new ObjectParser<>("search_template"); - PARSER.declareField((parser, request, s) -> - request.setScriptParams(parser.map()) - , new ParseField("params"), ObjectParser.ValueType.OBJECT); - PARSER.declareString((request, s) -> { - request.setScriptType(ScriptType.STORED); - request.setScript(s); - }, new ParseField("id")); - PARSER.declareBoolean(SearchTemplateRequest::setExplain, new ParseField("explain")); - PARSER.declareBoolean(SearchTemplateRequest::setProfile, new ParseField("profile")); - PARSER.declareField((parser, request, value) -> { - request.setScriptType(ScriptType.INLINE); - if (parser.currentToken() == XContentParser.Token.START_OBJECT) { - //convert the template to json which is the only supported XContentType (see CustomMustacheFactory#createEncoder) - try (XContentBuilder builder = XContentFactory.jsonBuilder()) { - request.setScript(Strings.toString(builder.copyCurrentStructure(parser))); - } catch (IOException e) { - throw new ParsingException(parser.getTokenLocation(), "Could not parse inline template", e); - } - } else { - request.setScript(parser.text()); - } - }, new ParseField("source", "inline", "template"), ObjectParser.ValueType.OBJECT_OR_STRING); - } - public RestSearchTemplateAction(Settings settings, RestController controller) { super(settings); @@ -99,17 +72,13 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client // Creates the search template request SearchTemplateRequest searchTemplateRequest; try (XContentParser parser = request.contentOrSourceParamParser()) { - searchTemplateRequest = PARSER.parse(parser, new SearchTemplateRequest(), null); + searchTemplateRequest = SearchTemplateRequest.fromXContent(parser); } searchTemplateRequest.setRequest(searchRequest); return channel -> client.execute(SearchTemplateAction.INSTANCE, searchTemplateRequest, new RestStatusToXContentListener<>(channel)); } - public static SearchTemplateRequest parse(XContentParser parser) throws IOException { - return PARSER.parse(parser, new SearchTemplateRequest(), null); - } - @Override protected Set responseParams() { return RESPONSE_PARAMS; diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java index b0186b7b0e3cf..da3cc3688149c 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java @@ -23,19 +23,28 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.ScriptType; import java.io.IOException; import java.util.Map; +import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; /** * A request to execute a search based on a search template. */ -public class SearchTemplateRequest extends ActionRequest implements CompositeIndicesRequest { +public class SearchTemplateRequest extends ActionRequest implements CompositeIndicesRequest, ToXContentObject { private SearchRequest request; private boolean simulate = false; @@ -60,6 +69,24 @@ public SearchRequest getRequest() { return request; } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SearchTemplateRequest request1 = (SearchTemplateRequest) o; + return simulate == request1.simulate && + explain == request1.explain && + profile == request1.profile && + Objects.equals(request, request1.request) && + scriptType == request1.scriptType && + Objects.equals(script, request1.script) && + Objects.equals(scriptParams, request1.scriptParams); + } + + @Override + public int hashCode() { + return Objects.hash(request, simulate, explain, profile, scriptType, script, scriptParams); + } public boolean isSimulate() { return simulate; @@ -134,6 +161,62 @@ public ActionRequestValidationException validate() { return validationException; } + private static ParseField ID_FIELD = new ParseField("id"); + private static ParseField SOURCE_FIELD = new ParseField("source", "inline", "template"); + + private static ParseField PARAMS_FIELD = new ParseField("params"); + private static ParseField EXPLAIN_FIELD = new ParseField("explain"); + private static ParseField PROFILE_FIELD = new ParseField("profile"); + + private static final ObjectParser PARSER; + static { + PARSER = new ObjectParser<>("search_template"); + PARSER.declareField((parser, request, s) -> + request.setScriptParams(parser.map()) + , PARAMS_FIELD, ObjectParser.ValueType.OBJECT); + PARSER.declareString((request, s) -> { + request.setScriptType(ScriptType.STORED); + request.setScript(s); + }, ID_FIELD); + PARSER.declareBoolean(SearchTemplateRequest::setExplain, EXPLAIN_FIELD); + PARSER.declareBoolean(SearchTemplateRequest::setProfile, PROFILE_FIELD); + PARSER.declareField((parser, request, value) -> { + request.setScriptType(ScriptType.INLINE); + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + //convert the template to json which is the only supported XContentType (see CustomMustacheFactory#createEncoder) + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + request.setScript(Strings.toString(builder.copyCurrentStructure(parser))); + } catch (IOException e) { + throw new ParsingException(parser.getTokenLocation(), "Could not parse inline template", e); + } + } else { + request.setScript(parser.text()); + } + }, SOURCE_FIELD, ObjectParser.ValueType.OBJECT_OR_STRING); + } + + public static SearchTemplateRequest fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, new SearchTemplateRequest(), null); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + if (scriptType == ScriptType.STORED) { + builder.field(ID_FIELD.getPreferredName(), script); + } else if (scriptType == ScriptType.INLINE) { + builder.field(SOURCE_FIELD.getPreferredName(), script); + } else { + throw new UnsupportedOperationException("Unrecognized script type [" + scriptType + "]."); + } + + return builder.field(PARAMS_FIELD.getPreferredName(), scriptParams) + .field(EXPLAIN_FIELD.getPreferredName(), explain) + .field(PROFILE_FIELD.getPreferredName(), profile) + .endObject(); + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java index 792d993915992..500a5a399ef4a 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java @@ -21,18 +21,23 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.rest.RestStatus; import java.io.IOException; import java.io.InputStream; +import java.util.Map; -public class SearchTemplateResponse extends ActionResponse implements StatusToXContentObject { +public class SearchTemplateResponse extends ActionResponse implements StatusToXContentObject { + public static ParseField TEMPLATE_OUTPUT_FIELD = new ParseField("template_output"); /** Contains the source of the rendered template **/ private BytesReference source; @@ -77,6 +82,30 @@ public void readFrom(StreamInput in) throws IOException { response = in.readOptionalStreamable(SearchResponse::new); } + public static SearchTemplateResponse fromXContent(XContentParser parser) throws IOException { + SearchTemplateResponse searchTemplateResponse = new SearchTemplateResponse(); + Map contentAsMap = parser.map(); + + if (contentAsMap.containsKey(TEMPLATE_OUTPUT_FIELD.getPreferredName())) { + Object source = contentAsMap.get(TEMPLATE_OUTPUT_FIELD.getPreferredName()); + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON) + .value(source); + searchTemplateResponse.setSource(BytesReference.bytes(builder)); + } else { + XContentType contentType = parser.contentType(); + XContentBuilder builder = XContentFactory.contentBuilder(contentType) + .map(contentAsMap); + XContentParser searchResponseParser = contentType.xContent().createParser( + parser.getXContentRegistry(), + parser.getDeprecationHandler(), + BytesReference.bytes(builder).streamInput()); + + SearchResponse searchResponse = SearchResponse.fromXContent(searchResponseParser); + searchTemplateResponse.setResponse(searchResponse); + } + return searchTemplateResponse; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (hasResponse()) { @@ -85,7 +114,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); //we can assume the template is always json as we convert it before compiling it try (InputStream stream = source.streamInput()) { - builder.rawField("template_output", stream, XContentType.JSON); + builder.rawField(TEMPLATE_OUTPUT_FIELD.getPreferredName(), stream, XContentType.JSON); } builder.endObject(); } diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java index 1529b655a5042..fe2fedf62b559 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java @@ -101,7 +101,7 @@ public void testTemplateQueryAsEscapedString() throws Exception { + " \"size\": 1" + " }" + "}"; - SearchTemplateRequest request = RestSearchTemplateAction.parse(createParser(JsonXContent.jsonXContent, query)); + SearchTemplateRequest request = SearchTemplateRequest.fromXContent(createParser(JsonXContent.jsonXContent, query)); request.setRequest(searchRequest); SearchTemplateResponse searchResponse = client().execute(SearchTemplateAction.INSTANCE, request).get(); assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)); @@ -122,7 +122,7 @@ public void testTemplateQueryAsEscapedStringStartingWithConditionalClause() thro + " \"use_size\": true" + " }" + "}"; - SearchTemplateRequest request = RestSearchTemplateAction.parse(createParser(JsonXContent.jsonXContent, templateString)); + SearchTemplateRequest request = SearchTemplateRequest.fromXContent(createParser(JsonXContent.jsonXContent, templateString)); request.setRequest(searchRequest); SearchTemplateResponse searchResponse = client().execute(SearchTemplateAction.INSTANCE, request).get(); assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)); @@ -143,7 +143,7 @@ public void testTemplateQueryAsEscapedStringWithConditionalClauseAtEnd() throws + " \"use_size\": true" + " }" + "}"; - SearchTemplateRequest request = RestSearchTemplateAction.parse(createParser(JsonXContent.jsonXContent, templateString)); + SearchTemplateRequest request = SearchTemplateRequest.fromXContent(createParser(JsonXContent.jsonXContent, templateString)); request.setRequest(searchRequest); SearchTemplateResponse searchResponse = client().execute(SearchTemplateAction.INSTANCE, request).get(); assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)); diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestTests.java index 9cdca70f0e1a6..7d4a6479727e2 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestTests.java @@ -19,117 +19,77 @@ package org.elasticsearch.script.mustache; -import org.elasticsearch.common.xcontent.XContentParseException; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.script.ScriptType; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.search.RandomSearchRequestGenerator; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.AbstractStreamableTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.function.Consumer; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasEntry; -import static org.hamcrest.Matchers.hasItems; -import static org.hamcrest.Matchers.hasKey; -import static org.hamcrest.Matchers.nullValue; - -public class SearchTemplateRequestTests extends ESTestCase { - - public void testParseInlineTemplate() throws Exception { - String source = "{" + - " 'source' : {\n" + - " 'query': {\n" + - " 'terms': {\n" + - " 'status': [\n" + - " '{{#status}}',\n" + - " '{{.}}',\n" + - " '{{/status}}'\n" + - " ]\n" + - " }\n" + - " }\n" + - " }" + - "}"; - - SearchTemplateRequest request = RestSearchTemplateAction.parse(newParser(source)); - assertThat(request.getScript(), equalTo("{\"query\":{\"terms\":{\"status\":[\"{{#status}}\",\"{{.}}\",\"{{/status}}\"]}}}")); - assertThat(request.getScriptType(), equalTo(ScriptType.INLINE)); - assertThat(request.getScriptParams(), nullValue()); - } +public class SearchTemplateRequestTests extends AbstractStreamableTestCase { - public void testParseInlineTemplateWithParams() throws Exception { - String source = "{" + - " 'source' : {" + - " 'query': { 'match' : { '{{my_field}}' : '{{my_value}}' } }," + - " 'size' : '{{my_size}}'" + - " }," + - " 'params' : {" + - " 'my_field' : 'foo'," + - " 'my_value' : 'bar'," + - " 'my_size' : 5" + - " }" + - "}"; - - SearchTemplateRequest request = RestSearchTemplateAction.parse(newParser(source)); - assertThat(request.getScript(), equalTo("{\"query\":{\"match\":{\"{{my_field}}\":\"{{my_value}}\"}},\"size\":\"{{my_size}}\"}")); - assertThat(request.getScriptType(), equalTo(ScriptType.INLINE)); - assertThat(request.getScriptParams().size(), equalTo(3)); - assertThat(request.getScriptParams(), hasEntry("my_field", "foo")); - assertThat(request.getScriptParams(), hasEntry("my_value", "bar")); - assertThat(request.getScriptParams(), hasEntry("my_size", 5)); + @Override + protected SearchTemplateRequest createBlankInstance() { + return new SearchTemplateRequest(); } - public void testParseInlineTemplateAsString() throws Exception { - String source = "{'source' : '{\\\"query\\\":{\\\"bool\\\":{\\\"must\\\":{\\\"match\\\":{\\\"foo\\\":\\\"{{text}}\\\"}}}}}'}"; - - SearchTemplateRequest request = RestSearchTemplateAction.parse(newParser(source)); - assertThat(request.getScript(), equalTo("{\"query\":{\"bool\":{\"must\":{\"match\":{\"foo\":\"{{text}}\"}}}}}")); - assertThat(request.getScriptType(), equalTo(ScriptType.INLINE)); - assertThat(request.getScriptParams(), nullValue()); + @Override + protected SearchTemplateRequest createTestInstance() { + return createRandomRequest(); } - @SuppressWarnings("unchecked") - public void testParseInlineTemplateAsStringWithParams() throws Exception { - String source = "{'source' : '{\\\"query\\\":{\\\"match\\\":{\\\"{{field}}\\\":\\\"{{value}}\\\"}}}', " + - "'params': {'status': ['pending', 'published']}}"; - - SearchTemplateRequest request = RestSearchTemplateAction.parse(newParser(source)); - assertThat(request.getScript(), equalTo("{\"query\":{\"match\":{\"{{field}}\":\"{{value}}\"}}}")); - assertThat(request.getScriptType(), equalTo(ScriptType.INLINE)); - assertThat(request.getScriptParams().size(), equalTo(1)); - assertThat(request.getScriptParams(), hasKey("status")); - assertThat((List) request.getScriptParams().get("status"), hasItems("pending", "published")); + @Override + protected SearchTemplateRequest mutateInstance(SearchTemplateRequest instance) throws IOException { + List> mutators = new ArrayList<>(); + + mutators.add(request -> request.setScriptType( + randomValueOtherThan(request.getScriptType(), () -> randomFrom(ScriptType.values())))); + mutators.add(request -> request.setScript( + randomValueOtherThan(request.getScript(), () -> randomAlphaOfLength(50)))); + + mutators.add(request -> { + Map mutatedScriptParams = new HashMap<>(request.getScriptParams()); + String newField = randomValueOtherThanMany(mutatedScriptParams::containsKey, () -> randomAlphaOfLength(5)); + mutatedScriptParams.put(newField, randomAlphaOfLength(10)); + request.setScriptParams(mutatedScriptParams); + }); + + mutators.add(request -> request.setProfile(!request.isProfile())); + mutators.add(request -> request.setExplain(!request.isExplain())); + mutators.add(request -> request.setSimulate(!request.isSimulate())); + + mutators.add(request -> request.setRequest( + RandomSearchRequestGenerator.randomSearchRequest(SearchSourceBuilder::searchSource))); + + SearchTemplateRequest mutatedInstance = copyInstance(instance); + Consumer mutator = randomFrom(mutators); + mutator.accept(mutatedInstance); + return mutatedInstance; } - public void testParseStoredTemplate() throws Exception { - String source = "{'id' : 'storedTemplate'}"; - - SearchTemplateRequest request = RestSearchTemplateAction.parse(newParser(source)); - assertThat(request.getScript(), equalTo("storedTemplate")); - assertThat(request.getScriptType(), equalTo(ScriptType.STORED)); - assertThat(request.getScriptParams(), nullValue()); - } - public void testParseStoredTemplateWithParams() throws Exception { - String source = "{'id' : 'another_template', 'params' : {'bar': 'foo'}}"; + public static SearchTemplateRequest createRandomRequest() { + SearchTemplateRequest request = new SearchTemplateRequest(); + request.setScriptType(randomFrom(ScriptType.values())); + request.setScript(randomAlphaOfLength(50)); - SearchTemplateRequest request = RestSearchTemplateAction.parse(newParser(source)); - assertThat(request.getScript(), equalTo("another_template")); - assertThat(request.getScriptType(), equalTo(ScriptType.STORED)); - assertThat(request.getScriptParams().size(), equalTo(1)); - assertThat(request.getScriptParams(), hasEntry("bar", "foo")); - } + Map scriptParams = new HashMap<>(); + for (int i = 0; i < randomInt(10); i++) { + scriptParams.put(randomAlphaOfLength(5), randomAlphaOfLength(10)); + } + request.setScriptParams(scriptParams); - public void testParseWrongTemplate() { - // Unclosed template id - expectThrows(XContentParseException.class, () -> RestSearchTemplateAction.parse(newParser("{'id' : 'another_temp }"))); - } + request.setExplain(randomBoolean()); + request.setProfile(randomBoolean()); + request.setSimulate(randomBoolean()); - /** - * Creates a {@link XContentParser} with the given String while replacing single quote to double quotes. - */ - private XContentParser newParser(String s) throws IOException { - assertNotNull(s); - return createParser(JsonXContent.jsonXContent, s.replace("'", "\"")); + request.setRequest(RandomSearchRequestGenerator.randomSearchRequest( + SearchSourceBuilder::searchSource)); + return request; } } diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestXContentTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestXContentTests.java new file mode 100644 index 0000000000000..0e9e8ca628975 --- /dev/null +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestXContentTests.java @@ -0,0 +1,197 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.script.mustache; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.nullValue; + +public class SearchTemplateRequestXContentTests extends AbstractXContentTestCase { + + @Override + public SearchTemplateRequest createTestInstance() { + return SearchTemplateRequestTests.createRandomRequest(); + } + + @Override + protected SearchTemplateRequest doParseInstance(XContentParser parser) throws IOException { + return SearchTemplateRequest.fromXContent(parser); + } + + /** + * Note that when checking equality for xContent parsing, we omit two parts of the request: + * - The 'simulate' option, since this parameter is not included in the + * request's xContent (it's instead used to determine the request endpoint). + * - The random SearchRequest, since this component only affects the request + * parameters and also isn't captured in the request's xContent. + */ + @Override + protected void assertEqualInstances(SearchTemplateRequest expectedInstance, SearchTemplateRequest newInstance) { + assertTrue( + expectedInstance.isExplain() == newInstance.isExplain() && + expectedInstance.isProfile() == newInstance.isProfile() && + expectedInstance.getScriptType() == newInstance.getScriptType() && + Objects.equals(expectedInstance.getScript(), newInstance.getScript()) && + Objects.equals(expectedInstance.getScriptParams(), newInstance.getScriptParams())); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public void testToXContentWithInlineTemplate() throws IOException { + SearchTemplateRequest request = new SearchTemplateRequest(); + + request.setScriptType(ScriptType.INLINE); + request.setScript("{\"query\": { \"match\" : { \"{{my_field}}\" : \"{{my_value}}\" } } }"); + request.setProfile(true); + + Map scriptParams = new HashMap<>(); + scriptParams.put("my_field", "foo"); + scriptParams.put("my_value", "bar"); + request.setScriptParams(scriptParams); + + XContentType contentType = randomFrom(XContentType.values()); + XContentBuilder expectedRequest = XContentFactory.contentBuilder(contentType) + .startObject() + .field("source", "{\"query\": { \"match\" : { \"{{my_field}}\" : \"{{my_value}}\" } } }") + .startObject("params") + .field("my_field", "foo") + .field("my_value", "bar") + .endObject() + .field("explain", false) + .field("profile", true) + .endObject(); + + XContentBuilder actualRequest = XContentFactory.contentBuilder(contentType); + request.toXContent(actualRequest, ToXContent.EMPTY_PARAMS); + + assertToXContentEquivalent(BytesReference.bytes(expectedRequest), + BytesReference.bytes(actualRequest), + contentType); + } + + public void testToXContentWithStoredTemplate() throws IOException { + SearchTemplateRequest request = new SearchTemplateRequest(); + + request.setScriptType(ScriptType.STORED); + request.setScript("match_template"); + request.setExplain(true); + + Map params = new HashMap<>(); + params.put("my_field", "foo"); + params.put("my_value", "bar"); + request.setScriptParams(params); + + XContentType contentType = randomFrom(XContentType.values()); + XContentBuilder expectedRequest = XContentFactory.contentBuilder(contentType) + .startObject() + .field("id", "match_template") + .startObject("params") + .field("my_field", "foo") + .field("my_value", "bar") + .endObject() + .field("explain", true) + .field("profile", false) + .endObject(); + + XContentBuilder actualRequest = XContentFactory.contentBuilder(contentType); + request.toXContent(actualRequest, ToXContent.EMPTY_PARAMS); + + assertToXContentEquivalent( + BytesReference.bytes(expectedRequest), + BytesReference.bytes(actualRequest), + contentType); + } + + public void testFromXContentWithEmbeddedTemplate() throws Exception { + String source = "{" + + " 'source' : {\n" + + " 'query': {\n" + + " 'terms': {\n" + + " 'status': [\n" + + " '{{#status}}',\n" + + " '{{.}}',\n" + + " '{{/status}}'\n" + + " ]\n" + + " }\n" + + " }\n" + + " }" + + "}"; + + SearchTemplateRequest request = SearchTemplateRequest.fromXContent(newParser(source)); + assertThat(request.getScript(), equalTo("{\"query\":{\"terms\":{\"status\":[\"{{#status}}\",\"{{.}}\",\"{{/status}}\"]}}}")); + assertThat(request.getScriptType(), equalTo(ScriptType.INLINE)); + assertThat(request.getScriptParams(), nullValue()); + } + + public void testFromXContentWithEmbeddedTemplateAndParams() throws Exception { + String source = "{" + + " 'source' : {" + + " 'query': { 'match' : { '{{my_field}}' : '{{my_value}}' } }," + + " 'size' : '{{my_size}}'" + + " }," + + " 'params' : {" + + " 'my_field' : 'foo'," + + " 'my_value' : 'bar'," + + " 'my_size' : 5" + + " }" + + "}"; + + SearchTemplateRequest request = SearchTemplateRequest.fromXContent(newParser(source)); + assertThat(request.getScript(), equalTo("{\"query\":{\"match\":{\"{{my_field}}\":\"{{my_value}}\"}},\"size\":\"{{my_size}}\"}")); + assertThat(request.getScriptType(), equalTo(ScriptType.INLINE)); + assertThat(request.getScriptParams().size(), equalTo(3)); + assertThat(request.getScriptParams(), hasEntry("my_field", "foo")); + assertThat(request.getScriptParams(), hasEntry("my_value", "bar")); + assertThat(request.getScriptParams(), hasEntry("my_size", 5)); + } + + public void testFromXContentWithMalformedRequest() { + // Unclosed template id + expectThrows(XContentParseException.class, () -> SearchTemplateRequest.fromXContent(newParser("{'id' : 'another_temp }"))); + } + + /** + * Creates a {@link XContentParser} with the given String while replacing single quote to double quotes. + */ + private XContentParser newParser(String s) throws IOException { + assertNotNull(s); + return createParser(JsonXContent.jsonXContent, s.replace("'", "\"")); + } +} diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateResponseTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateResponseTests.java new file mode 100644 index 0000000000000..53f5d1d8f842e --- /dev/null +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateResponseTests.java @@ -0,0 +1,211 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.script.mustache; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.text.Text; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.function.Predicate; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; + +public class SearchTemplateResponseTests extends AbstractXContentTestCase { + + @Override + protected SearchTemplateResponse createTestInstance() { + SearchTemplateResponse response = new SearchTemplateResponse(); + if (randomBoolean()) { + response.setResponse(createSearchResponse()); + } else { + response.setSource(createSource()); + } + return response; + } + + @Override + protected SearchTemplateResponse doParseInstance(XContentParser parser) throws IOException { + return SearchTemplateResponse.fromXContent(parser); + } + + /** + * For simplicity we create a minimal response, as there is already a dedicated + * test class for search response parsing and serialization. + */ + private static SearchResponse createSearchResponse() { + long tookInMillis = randomNonNegativeLong(); + int totalShards = randomIntBetween(1, Integer.MAX_VALUE); + int successfulShards = randomIntBetween(0, totalShards); + int skippedShards = randomIntBetween(0, totalShards); + InternalSearchResponse internalSearchResponse = InternalSearchResponse.empty(); + + return new SearchResponse(internalSearchResponse, null, totalShards, successfulShards, + skippedShards, tookInMillis, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); + } + + private static BytesReference createSource() { + try { + XContentBuilder source = XContentFactory.jsonBuilder() + .startObject() + .startObject("query") + .startObject("match") + .field(randomAlphaOfLength(5), randomAlphaOfLength(10)) + .endObject() + .endObject() + .endObject(); + return BytesReference.bytes(source); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + String templateOutputField = SearchTemplateResponse.TEMPLATE_OUTPUT_FIELD.getPreferredName(); + return field -> field.equals(templateOutputField) || field.startsWith(templateOutputField + "."); + } + + /** + * Note that we can't rely on normal equals and hashCode checks, since {@link SearchResponse} doesn't + * currently implement equals and hashCode. Instead, we compare the template outputs for equality, + * and perform some sanity checks on the search response instances. + */ + @Override + protected void assertEqualInstances(SearchTemplateResponse expectedInstance, SearchTemplateResponse newInstance) { + assertNotSame(newInstance, expectedInstance); + + BytesReference expectedSource = expectedInstance.getSource(); + BytesReference newSource = newInstance.getSource(); + assertEquals(expectedSource == null, newSource == null); + if (expectedSource != null) { + try { + assertToXContentEquivalent(expectedSource, newSource, XContentType.JSON); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + assertEquals(expectedInstance.hasResponse(), newInstance.hasResponse()); + if (expectedInstance.hasResponse()) { + SearchResponse expectedResponse = expectedInstance.getResponse(); + SearchResponse newResponse = newInstance.getResponse(); + + assertEquals(expectedResponse.getHits().totalHits, newResponse.getHits().totalHits); + assertEquals(expectedResponse.getHits().getMaxScore(), newResponse.getHits().getMaxScore(), 0.0001); + } + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + public void testSourceToXContent() throws IOException { + SearchTemplateResponse response = new SearchTemplateResponse(); + + XContentBuilder source = XContentFactory.jsonBuilder() + .startObject() + .startObject("query") + .startObject("terms") + .field("status", new String[]{"pending", "published"}) + .endObject() + .endObject() + .endObject(); + response.setSource(BytesReference.bytes(source)); + + XContentType contentType = randomFrom(XContentType.values()); + XContentBuilder expectedResponse = XContentFactory.contentBuilder(contentType) + .startObject() + .startObject("template_output") + .startObject("query") + .startObject("terms") + .field("status", new String[]{"pending", "published"}) + .endObject() + .endObject() + .endObject() + .endObject(); + + XContentBuilder actualResponse = XContentFactory.contentBuilder(contentType); + response.toXContent(actualResponse, ToXContent.EMPTY_PARAMS); + + assertToXContentEquivalent( + BytesReference.bytes(expectedResponse), + BytesReference.bytes(actualResponse), + contentType); + } + + public void testSearchResponseToXContent() throws IOException { + SearchHit hit = new SearchHit(1, "id", new Text("type"), Collections.emptyMap()); + hit.score(2.0f); + SearchHit[] hits = new SearchHit[] { hit }; + + InternalSearchResponse internalSearchResponse = new InternalSearchResponse( + new SearchHits(hits, 100, 1.5f), null, null, null, false, null, 1); + SearchResponse searchResponse = new SearchResponse(internalSearchResponse, null, + 0, 0, 0, 0, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); + + SearchTemplateResponse response = new SearchTemplateResponse(); + response.setResponse(searchResponse); + + XContentType contentType = randomFrom(XContentType.values()); + XContentBuilder expectedResponse = XContentFactory.contentBuilder(contentType) + .startObject() + .field("took", 0) + .field("timed_out", false) + .startObject("_shards") + .field("total", 0) + .field("successful", 0) + .field("skipped", 0) + .field("failed", 0) + .endObject() + .startObject("hits") + .field("total", 100) + .field("max_score", 1.5F) + .startArray("hits") + .startObject() + .field("_type", "type") + .field("_id", "id") + .field("_score", 2.0F) + .endObject() + .endArray() + .endObject() + .endObject(); + + XContentBuilder actualResponse = XContentFactory.contentBuilder(contentType); + response.toXContent(actualResponse, ToXContent.EMPTY_PARAMS); + + assertToXContentEquivalent( + BytesReference.bytes(expectedResponse), + BytesReference.bytes(actualResponse), + contentType); + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java index 0364ad667efc7..4ebcf8bfb82d2 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java @@ -32,6 +32,7 @@ import org.elasticsearch.painless.spi.PainlessExtension; import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.painless.spi.WhitelistLoader; import org.elasticsearch.plugins.ExtensiblePlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ScriptPlugin; @@ -39,6 +40,7 @@ import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctionScript; import java.util.ArrayList; import java.util.Arrays; @@ -55,18 +57,34 @@ */ public final class PainlessPlugin extends Plugin implements ScriptPlugin, ExtensiblePlugin, ActionPlugin { - private final Map, List> extendedWhitelists = new HashMap<>(); + private static final Map, List> whitelists; + + /* + * Contexts from Core that need custom whitelists can add them to the map below. + * Whitelist resources should be added as appropriately named, separate files + * under Painless' resources + */ + static { + Map, List> map = new HashMap<>(); + + // Moving Function Pipeline Agg + List movFn = new ArrayList<>(Whitelist.BASE_WHITELISTS); + movFn.add(WhitelistLoader.loadFromResourceFiles(Whitelist.class, "org.elasticsearch.aggs.movfn.txt")); + map.put(MovingFunctionScript.CONTEXT, movFn); + + whitelists = map; + } @Override public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { Map, List> contextsWithWhitelists = new HashMap<>(); for (ScriptContext context : contexts) { // we might have a context that only uses the base whitelists, so would not have been filled in by reloadSPI - List whitelists = extendedWhitelists.get(context); - if (whitelists == null) { - whitelists = new ArrayList<>(Whitelist.BASE_WHITELISTS); + List contextWhitelists = whitelists.get(context); + if (contextWhitelists == null) { + contextWhitelists = new ArrayList<>(Whitelist.BASE_WHITELISTS); } - contextsWithWhitelists.put(context, whitelists); + contextsWithWhitelists.put(context, contextWhitelists); } return new PainlessScriptEngine(settings, contextsWithWhitelists); } @@ -80,7 +98,7 @@ public List> getSettings() { public void reloadSPI(ClassLoader loader) { for (PainlessExtension extension : ServiceLoader.load(PainlessExtension.class, loader)) { for (Map.Entry, List> entry : extension.getContextWhitelists().entrySet()) { - List existing = extendedWhitelists.computeIfAbsent(entry.getKey(), + List existing = whitelists.computeIfAbsent(entry.getKey(), c -> new ArrayList<>(Whitelist.BASE_WHITELISTS)); existing.addAll(entry.getValue()); } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.aggs.movfn.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.aggs.movfn.txt new file mode 100644 index 0000000000000..a120b73820ada --- /dev/null +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.aggs.movfn.txt @@ -0,0 +1,32 @@ +# +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# This file contains a whitelist for the Moving Function pipeline aggregator in core + +class org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctions { + double max(double[]) + double min(double[]) + double sum(double[]) + double stdDev(double[], double) + double unweightedAvg(double[]) + double linearWeightedAvg(double[]) + double ewma(double[], double) + double holt(double[], double, double) + double holtWinters(double[], double, double, double, int, boolean) +} \ No newline at end of file diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml new file mode 100644 index 0000000000000..039b54aab01d1 --- /dev/null +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml @@ -0,0 +1,315 @@ +# Sanity integration test to make sure the custom context and whitelist work for moving_fn pipeline agg +# +setup: + - skip: + version: " - 6.4.0" + reason: "moving_fn added in 6.4.0" + - do: + indices.create: + index: test + body: + mappings: + _doc: + properties: + value_field: + type: integer + date: + type: date + + - do: + bulk: + refresh: true + body: + - index: + _index: test + _type: _doc + _id: 1 + - date: "2017-01-01T00:00:00" + value_field: 1 + - index: + _index: test + _type: _doc + _id: 2 + - date: "2017-01-02T00:00:00" + value_field: 2 + - index: + _index: test + _type: _doc + _id: 3 + - date: "2017-01-03T00:00:00" + value_field: 3 + - index: + _index: test + _type: _doc + _id: 4 + - date: "2017-01-04T00:00:00" + value_field: 4 + - index: + _index: test + _type: _doc + _id: 5 + - date: "2017-01-05T00:00:00" + value_field: 5 + - index: + _index: test + _type: _doc + _id: 6 + - date: "2017-01-06T00:00:00" + value_field: 6 + + - do: + indices.refresh: + index: [test] + +--- +"max": + + - do: + search: + body: + size: 0 + aggs: + the_histo: + date_histogram: + field: "date" + interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: 3 + script: "MovingFunctions.max(values)" + + - match: { hits.total: 6 } + - length: { hits.hits: 0 } + - is_false: aggregations.the_histo.buckets.0.the_mov_fn.value + - match: { aggregations.the_histo.buckets.1.the_mov_fn.value: 1.0 } + - match: { aggregations.the_histo.buckets.2.the_mov_fn.value: 2.0 } + - match: { aggregations.the_histo.buckets.3.the_mov_fn.value: 3.0 } + - match: { aggregations.the_histo.buckets.4.the_mov_fn.value: 4.0 } + - match: { aggregations.the_histo.buckets.5.the_mov_fn.value: 5.0 } + +--- +"min": + + - do: + search: + body: + size: 0 + aggs: + the_histo: + date_histogram: + field: "date" + interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: 3 + script: "MovingFunctions.min(values)" + + - match: { hits.total: 6 } + - length: { hits.hits: 0 } + - is_false: aggregations.the_histo.buckets.0.the_mov_fn.value + - match: { aggregations.the_histo.buckets.1.the_mov_fn.value: 1.0 } + - match: { aggregations.the_histo.buckets.2.the_mov_fn.value: 1.0 } + - match: { aggregations.the_histo.buckets.3.the_mov_fn.value: 1.0 } + - match: { aggregations.the_histo.buckets.4.the_mov_fn.value: 2.0 } + - match: { aggregations.the_histo.buckets.5.the_mov_fn.value: 3.0 } + +--- +"sum": + + - do: + search: + body: + size: 0 + aggs: + the_histo: + date_histogram: + field: "date" + interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: 3 + script: "MovingFunctions.sum(values)" + + - match: { hits.total: 6 } + - length: { hits.hits: 0 } + - match: { aggregations.the_histo.buckets.0.the_mov_fn.value: 0.0 } + - match: { aggregations.the_histo.buckets.1.the_mov_fn.value: 1.0 } + - match: { aggregations.the_histo.buckets.2.the_mov_fn.value: 3.0 } + - match: { aggregations.the_histo.buckets.3.the_mov_fn.value: 6.0 } + - match: { aggregations.the_histo.buckets.4.the_mov_fn.value: 9.0 } + - match: { aggregations.the_histo.buckets.5.the_mov_fn.value: 12.0 } + +--- +"unweightedAvg": + + - do: + search: + body: + size: 0 + aggs: + the_histo: + date_histogram: + field: "date" + interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: 3 + script: "MovingFunctions.unweightedAvg(values)" + + - match: { hits.total: 6 } + - length: { hits.hits: 0 } + + +--- +"linearWeightedAvg": + + - do: + search: + body: + size: 0 + aggs: + the_histo: + date_histogram: + field: "date" + interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: 3 + script: "MovingFunctions.linearWeightedAvg(values)" + + - match: { hits.total: 6 } + - length: { hits.hits: 0 } + + +--- +"ewma": + + - do: + search: + body: + size: 0 + aggs: + the_histo: + date_histogram: + field: "date" + interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: 3 + script: "MovingFunctions.ewma(values, 0.1)" + + - match: { hits.total: 6 } + - length: { hits.hits: 0 } + + +--- +"holt": + + - do: + search: + body: + size: 0 + aggs: + the_histo: + date_histogram: + field: "date" + interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: 3 + script: "MovingFunctions.holt(values, 0.1, 0.1)" + + - match: { hits.total: 6 } + - length: { hits.hits: 0 } + + +--- +"holtWinters": + + - do: + search: + body: + size: 0 + aggs: + the_histo: + date_histogram: + field: "date" + interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: 1 + script: "if (values.length > 1) { MovingFunctions.holtWinters(values, 0.1, 0.1, 0.1, 1, true)}" + + - match: { hits.total: 6 } + - length: { hits.hits: 0 } + +--- +"stdDev": + + - do: + search: + body: + size: 0 + aggs: + the_histo: + date_histogram: + field: "date" + interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: 3 + script: "MovingFunctions.stdDev(values, MovingFunctions.unweightedAvg(values))" + + - match: { hits.total: 6 } + - length: { hits.hits: 0 } + + + + + + diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java index 5babcef2e8d65..103679f5328ef 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java @@ -161,7 +161,7 @@ static BinaryFieldMapper createQueryBuilderFieldBuilder(BuilderContext context) } static RangeFieldMapper createExtractedRangeFieldBuilder(String name, RangeType rangeType, BuilderContext context) { - RangeFieldMapper.Builder builder = new RangeFieldMapper.Builder(name, rangeType, context.indexCreatedVersion()); + RangeFieldMapper.Builder builder = new RangeFieldMapper.Builder(name, rangeType); // For now no doc values, because in processQuery(...) only the Lucene range fields get added: builder.docValues(false); return builder.build(context); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java index 12db47908d1f3..6e39a7f50d2cd 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.netty4.cors.Netty4CorsHandler; import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest; import org.elasticsearch.rest.AbstractRestChannel; @@ -60,27 +61,29 @@ final class Netty4HttpChannel extends AbstractRestChannel { private final FullHttpRequest nettyRequest; private final HttpPipelinedRequest pipelinedRequest; private final ThreadContext threadContext; + private final HttpHandlingSettings handlingSettings; /** * @param transport The corresponding NettyHttpServerTransport where this channel belongs to. * @param request The request that is handled by this channel. * @param pipelinedRequest If HTTP pipelining is enabled provide the corresponding pipelined request. May be null if - * HTTP pipelining is disabled. - * @param detailedErrorsEnabled true iff error messages should include stack traces. + * HTTP pipelining is disabled. + * @param handlingSettings true iff error messages should include stack traces. * @param threadContext the thread context for the channel */ Netty4HttpChannel( final Netty4HttpServerTransport transport, final Netty4HttpRequest request, final HttpPipelinedRequest pipelinedRequest, - final boolean detailedErrorsEnabled, + final HttpHandlingSettings handlingSettings, final ThreadContext threadContext) { - super(request, detailedErrorsEnabled); + super(request, handlingSettings.getDetailedErrorsEnabled()); this.transport = transport; this.channel = request.getChannel(); this.nettyRequest = request.request(); this.pipelinedRequest = pipelinedRequest; this.threadContext = threadContext; + this.handlingSettings = handlingSettings; } @Override @@ -170,7 +173,7 @@ private void setHeaderField(HttpResponse resp, String headerField, String value, } private void addCookies(HttpResponse resp) { - if (transport.resetCookies) { + if (handlingSettings.isResetCookies()) { String cookieString = nettyRequest.headers().get(HttpHeaderNames.COOKIE); if (cookieString != null) { Set cookies = ServerCookieDecoder.STRICT.decode(cookieString); @@ -222,8 +225,6 @@ private FullHttpResponse newResponse(ByteBuf buffer) { return response; } - private static final HttpResponseStatus TOO_MANY_REQUESTS = new HttpResponseStatus(429, "Too Many Requests"); - private static Map MAP; static { @@ -266,7 +267,7 @@ private FullHttpResponse newResponse(ByteBuf buffer) { map.put(RestStatus.UNPROCESSABLE_ENTITY, HttpResponseStatus.BAD_REQUEST); map.put(RestStatus.LOCKED, HttpResponseStatus.BAD_REQUEST); map.put(RestStatus.FAILED_DEPENDENCY, HttpResponseStatus.BAD_REQUEST); - map.put(RestStatus.TOO_MANY_REQUESTS, TOO_MANY_REQUESTS); + map.put(RestStatus.TOO_MANY_REQUESTS, HttpResponseStatus.TOO_MANY_REQUESTS); map.put(RestStatus.INTERNAL_SERVER_ERROR, HttpResponseStatus.INTERNAL_SERVER_ERROR); map.put(RestStatus.NOT_IMPLEMENTED, HttpResponseStatus.NOT_IMPLEMENTED); map.put(RestStatus.BAD_GATEWAY, HttpResponseStatus.BAD_GATEWAY); @@ -279,5 +280,4 @@ private FullHttpResponse newResponse(ByteBuf buffer) { private static HttpResponseStatus getStatus(RestStatus status) { return MAP.getOrDefault(status, HttpResponseStatus.INTERNAL_SERVER_ERROR); } - } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java index 1fd18b2a016d7..74429c8dda9b7 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java @@ -29,6 +29,7 @@ import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.HttpHeaders; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.transport.netty4.Netty4Utils; @@ -39,14 +40,15 @@ class Netty4HttpRequestHandler extends SimpleChannelInboundHandler { private final Netty4HttpServerTransport serverTransport; + private final HttpHandlingSettings handlingSettings; private final boolean httpPipeliningEnabled; - private final boolean detailedErrorsEnabled; private final ThreadContext threadContext; - Netty4HttpRequestHandler(Netty4HttpServerTransport serverTransport, boolean detailedErrorsEnabled, ThreadContext threadContext) { + Netty4HttpRequestHandler(Netty4HttpServerTransport serverTransport, HttpHandlingSettings handlingSettings, + ThreadContext threadContext) { this.serverTransport = serverTransport; this.httpPipeliningEnabled = serverTransport.pipelining; - this.detailedErrorsEnabled = detailedErrorsEnabled; + this.handlingSettings = handlingSettings; this.threadContext = threadContext; } @@ -109,7 +111,7 @@ protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Except Netty4HttpChannel innerChannel; try { innerChannel = - new Netty4HttpChannel(serverTransport, httpRequest, pipelinedRequest, detailedErrorsEnabled, threadContext); + new Netty4HttpChannel(serverTransport, httpRequest, pipelinedRequest, handlingSettings, threadContext); } catch (final IllegalArgumentException e) { if (badRequestCause == null) { badRequestCause = e; @@ -124,7 +126,7 @@ protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Except copy, ctx.channel()); innerChannel = - new Netty4HttpChannel(serverTransport, innerRequest, pipelinedRequest, detailedErrorsEnabled, threadContext); + new Netty4HttpChannel(serverTransport, innerRequest, pipelinedRequest, handlingSettings, threadContext); } channel = innerChannel; } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index c8c2c4829d2cf..8e5bace46aa7e 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -19,8 +19,6 @@ package org.elasticsearch.http.netty4; -import com.carrotsearch.hppc.IntHashSet; -import com.carrotsearch.hppc.IntSet; import io.netty.bootstrap.ServerBootstrap; import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; @@ -44,15 +42,12 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.NetworkExceptionHelper; -import org.elasticsearch.common.transport.PortsRange; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -62,18 +57,14 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.http.BindHttpException; -import org.elasticsearch.http.HttpInfo; -import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.HttpStats; import org.elasticsearch.http.netty4.cors.Netty4CorsConfig; import org.elasticsearch.http.netty4.cors.Netty4CorsConfigBuilder; import org.elasticsearch.http.netty4.cors.Netty4CorsHandler; import org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler; -import org.elasticsearch.rest.RestChannel; -import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestUtils; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BindTransportException; import org.elasticsearch.transport.netty4.Netty4OpenChannelsHandler; import org.elasticsearch.transport.netty4.Netty4Utils; @@ -94,7 +85,6 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_BIND_HOST; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED; @@ -102,9 +92,6 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PORT; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_HOST; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_KEEP_ALIVE; @@ -116,7 +103,7 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS; import static org.elasticsearch.http.netty4.cors.Netty4CorsHandler.ANY_ORIGIN; -public class Netty4HttpServerTransport extends AbstractLifecycleComponent implements HttpServerTransport { +public class Netty4HttpServerTransport extends AbstractHttpServerTransport { static { Netty4Utils.setup(); @@ -167,11 +154,8 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting("http.netty.receive_predictor_size", new ByteSizeValue(64, ByteSizeUnit.KB), Property.NodeScope); - - protected final NetworkService networkService; protected final BigArrays bigArrays; - protected final ByteSizeValue maxContentLength; protected final ByteSizeValue maxInitialLineLength; protected final ByteSizeValue maxHeaderSize; protected final ByteSizeValue maxChunkSize; @@ -182,20 +166,6 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem protected final int pipeliningMaxEvents; - protected final boolean compression; - - protected final int compressionLevel; - - protected final boolean resetCookies; - - protected final PortsRange port; - - protected final String bindHosts[]; - - protected final String publishHosts[]; - - protected final boolean detailedErrorsEnabled; - protected final ThreadPool threadPool; /** * The registry used to construct parsers so they support {@link XContentParser#namedObject(Class, String, Object)}. */ @@ -211,14 +181,13 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem private final int readTimeoutMillis; protected final int maxCompositeBufferComponents; - private final Dispatcher dispatcher; protected volatile ServerBootstrap serverBootstrap; - protected volatile BoundTransportAddress boundAddress; - protected final List serverChannels = new ArrayList<>(); + protected final HttpHandlingSettings httpHandlingSettings; + // package private for testing Netty4OpenChannelsHandler serverOpenChannels; @@ -227,49 +196,40 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem public Netty4HttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, ThreadPool threadPool, NamedXContentRegistry xContentRegistry, Dispatcher dispatcher) { - super(settings); + super(settings, networkService, threadPool, dispatcher); Netty4Utils.setAvailableProcessors(EsExecutors.PROCESSORS_SETTING.get(settings)); - this.networkService = networkService; this.bigArrays = bigArrays; - this.threadPool = threadPool; this.xContentRegistry = xContentRegistry; - this.dispatcher = dispatcher; - ByteSizeValue maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings); this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings); this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); this.maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings); - this.resetCookies = SETTING_HTTP_RESET_COOKIES.get(settings); + this.httpHandlingSettings = new HttpHandlingSettings(Math.toIntExact(maxContentLength.getBytes()), + Math.toIntExact(maxChunkSize.getBytes()), + Math.toIntExact(maxHeaderSize.getBytes()), + Math.toIntExact(maxInitialLineLength.getBytes()), + SETTING_HTTP_RESET_COOKIES.get(settings), + SETTING_HTTP_COMPRESSION.get(settings), + SETTING_HTTP_COMPRESSION_LEVEL.get(settings), + SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings)); + this.maxCompositeBufferComponents = SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings); this.workerCount = SETTING_HTTP_WORKER_COUNT.get(settings); - this.port = SETTING_HTTP_PORT.get(settings); - // we can't make the network.bind_host a fallback since we already fall back to http.host hence the extra conditional here - List httpBindHost = SETTING_HTTP_BIND_HOST.get(settings); - this.bindHosts = (httpBindHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING.get(settings) : httpBindHost) - .toArray(Strings.EMPTY_ARRAY); - // we can't make the network.publish_host a fallback since we already fall back to http.host hence the extra conditional here - List httpPublishHost = SETTING_HTTP_PUBLISH_HOST.get(settings); - this.publishHosts = (httpPublishHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING.get(settings) : httpPublishHost) - .toArray(Strings.EMPTY_ARRAY); + this.tcpNoDelay = SETTING_HTTP_TCP_NO_DELAY.get(settings); this.tcpKeepAlive = SETTING_HTTP_TCP_KEEP_ALIVE.get(settings); this.reuseAddress = SETTING_HTTP_TCP_REUSE_ADDRESS.get(settings); this.tcpSendBufferSize = SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings); this.tcpReceiveBufferSize = SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings); - this.detailedErrorsEnabled = SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings); this.readTimeoutMillis = Math.toIntExact(SETTING_HTTP_READ_TIMEOUT.get(settings).getMillis()); ByteSizeValue receivePredictor = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE.get(settings); recvByteBufAllocator = new FixedRecvByteBufAllocator(receivePredictor.bytesAsInt()); - this.compression = SETTING_HTTP_COMPRESSION.get(settings); - this.compressionLevel = SETTING_HTTP_COMPRESSION_LEVEL.get(settings); this.pipelining = SETTING_PIPELINING.get(settings); this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings); this.corsConfig = buildCorsConfig(settings); - this.maxContentLength = maxContentLength; - logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}], " + "receive_predictor[{}], max_composite_buffer_components[{}], pipelining[{}], pipelining_max_events[{}]", maxChunkSize, maxHeaderSize, maxInitialLineLength, this.maxContentLength, receivePredictor, maxCompositeBufferComponents, @@ -326,65 +286,6 @@ protected void doStart() { } } - private BoundTransportAddress createBoundHttpAddress() { - // Bind and start to accept incoming connections. - InetAddress hostAddresses[]; - try { - hostAddresses = networkService.resolveBindHostAddresses(bindHosts); - } catch (IOException e) { - throw new BindHttpException("Failed to resolve host [" + Arrays.toString(bindHosts) + "]", e); - } - - List boundAddresses = new ArrayList<>(hostAddresses.length); - for (InetAddress address : hostAddresses) { - boundAddresses.add(bindAddress(address)); - } - - final InetAddress publishInetAddress; - try { - publishInetAddress = networkService.resolvePublishHostAddresses(publishHosts); - } catch (Exception e) { - throw new BindTransportException("Failed to resolve publish address", e); - } - - final int publishPort = resolvePublishPort(settings, boundAddresses, publishInetAddress); - final InetSocketAddress publishAddress = new InetSocketAddress(publishInetAddress, publishPort); - return new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[0]), new TransportAddress(publishAddress)); - } - - // package private for tests - static int resolvePublishPort(Settings settings, List boundAddresses, InetAddress publishInetAddress) { - int publishPort = SETTING_HTTP_PUBLISH_PORT.get(settings); - - if (publishPort < 0) { - for (TransportAddress boundAddress : boundAddresses) { - InetAddress boundInetAddress = boundAddress.address().getAddress(); - if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { - publishPort = boundAddress.getPort(); - break; - } - } - } - - // if no matching boundAddress found, check if there is a unique port for all bound addresses - if (publishPort < 0) { - final IntSet ports = new IntHashSet(); - for (TransportAddress boundAddress : boundAddresses) { - ports.add(boundAddress.getPort()); - } - if (ports.size() == 1) { - publishPort = ports.iterator().next().value; - } - } - - if (publishPort < 0) { - throw new BindHttpException("Failed to auto-resolve http publish port, multiple bound addresses " + boundAddresses + - " with distinct ports and none of them matched the publish address (" + publishInetAddress + "). " + - "Please specify a unique port by setting " + SETTING_HTTP_PORT.getKey() + " or " + SETTING_HTTP_PUBLISH_PORT.getKey()); - } - return publishPort; - } - // package private for testing static Netty4CorsConfig buildCorsConfig(Settings settings) { if (SETTING_CORS_ENABLED.get(settings) == false) { @@ -419,7 +320,8 @@ static Netty4CorsConfig buildCorsConfig(Settings settings) { .build(); } - private TransportAddress bindAddress(final InetAddress hostAddress) { + @Override + protected TransportAddress bindAddress(final InetAddress hostAddress) { final AtomicReference lastException = new AtomicReference<>(); final AtomicReference boundSocket = new AtomicReference<>(); boolean success = port.iterate(portNumber -> { @@ -473,20 +375,6 @@ protected void doStop() { protected void doClose() { } - @Override - public BoundTransportAddress boundAddress() { - return this.boundAddress; - } - - @Override - public HttpInfo info() { - BoundTransportAddress boundTransportAddress = boundAddress(); - if (boundTransportAddress == null) { - return null; - } - return new HttpInfo(boundTransportAddress, maxContentLength.getBytes()); - } - @Override public HttpStats stats() { Netty4OpenChannelsHandler channels = serverOpenChannels; @@ -497,20 +385,6 @@ public Netty4CorsConfig getCorsConfig() { return corsConfig; } - void dispatchRequest(final RestRequest request, final RestChannel channel) { - final ThreadContext threadContext = threadPool.getThreadContext(); - try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - dispatcher.dispatchRequest(request, channel, threadContext); - } - } - - void dispatchBadRequest(final RestRequest request, final RestChannel channel, final Throwable cause) { - final ThreadContext threadContext = threadPool.getThreadContext(); - try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - dispatcher.dispatchBadRequest(request, channel, threadContext, cause); - } - } - protected void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { if (cause instanceof ReadTimeoutException) { if (logger.isTraceEnabled()) { @@ -539,20 +413,22 @@ protected void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throw } public ChannelHandler configureServerChannelHandler() { - return new HttpChannelHandler(this, detailedErrorsEnabled, threadPool.getThreadContext()); + return new HttpChannelHandler(this, httpHandlingSettings, threadPool.getThreadContext()); } protected static class HttpChannelHandler extends ChannelInitializer { private final Netty4HttpServerTransport transport; private final Netty4HttpRequestHandler requestHandler; + private final HttpHandlingSettings handlingSettings; protected HttpChannelHandler( final Netty4HttpServerTransport transport, - final boolean detailedErrorsEnabled, + final HttpHandlingSettings handlingSettings, final ThreadContext threadContext) { this.transport = transport; - this.requestHandler = new Netty4HttpRequestHandler(transport, detailedErrorsEnabled, threadContext); + this.handlingSettings = handlingSettings; + this.requestHandler = new Netty4HttpRequestHandler(transport, handlingSettings, threadContext); } @Override @@ -560,18 +436,18 @@ protected void initChannel(Channel ch) throws Exception { ch.pipeline().addLast("openChannels", transport.serverOpenChannels); ch.pipeline().addLast("read_timeout", new ReadTimeoutHandler(transport.readTimeoutMillis, TimeUnit.MILLISECONDS)); final HttpRequestDecoder decoder = new HttpRequestDecoder( - Math.toIntExact(transport.maxInitialLineLength.getBytes()), - Math.toIntExact(transport.maxHeaderSize.getBytes()), - Math.toIntExact(transport.maxChunkSize.getBytes())); + handlingSettings.getMaxInitialLineLength(), + handlingSettings.getMaxHeaderSize(), + handlingSettings.getMaxChunkSize()); decoder.setCumulator(ByteToMessageDecoder.COMPOSITE_CUMULATOR); ch.pipeline().addLast("decoder", decoder); ch.pipeline().addLast("decoder_compress", new HttpContentDecompressor()); ch.pipeline().addLast("encoder", new HttpResponseEncoder()); - final HttpObjectAggregator aggregator = new HttpObjectAggregator(Math.toIntExact(transport.maxContentLength.getBytes())); + final HttpObjectAggregator aggregator = new HttpObjectAggregator(handlingSettings.getMaxContentLength()); aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); ch.pipeline().addLast("aggregator", aggregator); - if (transport.compression) { - ch.pipeline().addLast("encoder_compress", new HttpContentCompressor(transport.compressionLevel)); + if (handlingSettings.isCompression()) { + ch.pipeline().addLast("encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); } if (SETTING_CORS_ENABLED.get(transport.settings())) { ch.pipeline().addLast("cors", new Netty4CorsHandler(transport.getCorsConfig())); @@ -587,7 +463,6 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E Netty4Utils.maybeDie(cause); super.exceptionCaught(ctx, cause); } - } } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java index 918e98fd2e7c0..0ef1ea585b11c 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java @@ -56,6 +56,7 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.http.NullDispatcher; import org.elasticsearch.http.netty4.cors.Netty4CorsHandler; @@ -212,10 +213,11 @@ public void testHeadersSet() { httpRequest.headers().add(HttpHeaderNames.ORIGIN, "remote"); final WriteCapturingChannel writeCapturingChannel = new WriteCapturingChannel(); Netty4HttpRequest request = new Netty4HttpRequest(xContentRegistry(), httpRequest, writeCapturingChannel); + HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; // send a response Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, null, randomBoolean(), threadPool.getThreadContext()); + new Netty4HttpChannel(httpServerTransport, request, null, handlingSettings, threadPool.getThreadContext()); TestResponse resp = new TestResponse(); final String customHeader = "custom-header"; final String customHeaderValue = "xyz"; @@ -242,8 +244,9 @@ public void testReleaseOnSendToClosedChannel() { final EmbeddedChannel embeddedChannel = new EmbeddedChannel(); final Netty4HttpRequest request = new Netty4HttpRequest(registry, httpRequest, embeddedChannel); final HttpPipelinedRequest pipelinedRequest = randomBoolean() ? new HttpPipelinedRequest(request.request(), 1) : null; + HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; final Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, pipelinedRequest, randomBoolean(), threadPool.getThreadContext()); + new Netty4HttpChannel(httpServerTransport, request, pipelinedRequest, handlingSettings, threadPool.getThreadContext()); final TestResponse response = new TestResponse(bigArrays); assertThat(response.content(), instanceOf(Releasable.class)); embeddedChannel.close(); @@ -261,8 +264,9 @@ public void testReleaseOnSendToChannelAfterException() throws IOException { final EmbeddedChannel embeddedChannel = new EmbeddedChannel(); final Netty4HttpRequest request = new Netty4HttpRequest(registry, httpRequest, embeddedChannel); final HttpPipelinedRequest pipelinedRequest = randomBoolean() ? new HttpPipelinedRequest(request.request(), 1) : null; + HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; final Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, pipelinedRequest, randomBoolean(), threadPool.getThreadContext()); + new Netty4HttpChannel(httpServerTransport, request, pipelinedRequest, handlingSettings, threadPool.getThreadContext()); final BytesRestResponse response = new BytesRestResponse(RestStatus.INTERNAL_SERVER_ERROR, JsonXContent.contentBuilder().startObject().endObject()); assertThat(response.content(), not(instanceOf(Releasable.class))); @@ -306,8 +310,9 @@ public void testConnectionClose() throws Exception { // send a response, the channel close status should match assertTrue(embeddedChannel.isOpen()); + HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; final Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, null, randomBoolean(), threadPool.getThreadContext()); + new Netty4HttpChannel(httpServerTransport, request, null, handlingSettings, threadPool.getThreadContext()); final TestResponse resp = new TestResponse(); channel.sendResponse(resp); assertThat(embeddedChannel.isOpen(), equalTo(!close)); @@ -332,9 +337,10 @@ private FullHttpResponse executeRequest(final Settings settings, final String or final WriteCapturingChannel writeCapturingChannel = new WriteCapturingChannel(); final Netty4HttpRequest request = new Netty4HttpRequest(xContentRegistry(), httpRequest, writeCapturingChannel); + HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, null, randomBoolean(), threadPool.getThreadContext()); + new Netty4HttpChannel(httpServerTransport, request, null, handlingSettings, threadPool.getThreadContext()); channel.sendResponse(new TestResponse()); // get the response diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java index 91a5465f6a764..0eb14a8a76e9b 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java @@ -184,7 +184,7 @@ private class CustomHttpChannelHandler extends Netty4HttpServerTransport.HttpCha private final ExecutorService executorService; CustomHttpChannelHandler(Netty4HttpServerTransport transport, ExecutorService executorService, ThreadContext threadContext) { - super(transport, randomBoolean(), threadContext); + super(transport, transport.httpHandlingSettings, threadContext); this.executorService = executorService; } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java index 028770ed22469..bc89558d3c6dc 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java @@ -99,5 +99,4 @@ public void testInvalidHeaderValue() throws IOException { assertThat(map.get("type"), equalTo("content_type_header_exception")); assertThat(map.get("reason"), equalTo("java.lang.IllegalArgumentException: invalid Content-Type header []")); } - } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index 010c4b92c21a0..e31495efc0eef 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -90,6 +90,8 @@ public List> getSettings() { S3ClientSettings.PROXY_PASSWORD_SETTING, S3ClientSettings.READ_TIMEOUT_SETTING, S3ClientSettings.MAX_RETRIES_SETTING, - S3ClientSettings.USE_THROTTLE_RETRIES_SETTING); + S3ClientSettings.USE_THROTTLE_RETRIES_SETTING, + S3Repository.ACCESS_KEY_SETTING, + S3Repository.SECRET_KEY_SETTING); } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index e3e89c41514de..e599f84b411e4 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -21,7 +21,10 @@ import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.CannedAccessControlList; import com.amazonaws.services.s3.model.StorageClass; + +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -29,6 +32,12 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; +import org.elasticsearch.rest.AbstractRestChannel; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.action.admin.cluster.RestGetRepositoriesAction; +import org.elasticsearch.test.rest.FakeRestRequest; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -38,9 +47,14 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; import static java.util.Collections.emptyMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.not; +import static org.mockito.Mockito.mock; public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCase { @@ -81,7 +95,9 @@ protected void createTestRepository(final String name) { .put(S3Repository.BUFFER_SIZE_SETTING.getKey(), bufferSize) .put(S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), serverSideEncryption) .put(S3Repository.CANNED_ACL_SETTING.getKey(), cannedACL) - .put(S3Repository.STORAGE_CLASS_SETTING.getKey(), storageClass))); + .put(S3Repository.STORAGE_CLASS_SETTING.getKey(), storageClass) + .put(S3Repository.ACCESS_KEY_SETTING.getKey(), "not_used_but_this_is_a_secret") + .put(S3Repository.SECRET_KEY_SETTING.getKey(), "not_used_but_this_is_a_secret"))); } @Override @@ -106,4 +122,32 @@ public synchronized AmazonS3 client(final Settings repositorySettings) { })); } } + + public void testInsecureRepositoryCredentials() throws Exception { + final String repositoryName = "testInsecureRepositoryCredentials"; + createTestRepository(repositoryName); + final NodeClient nodeClient = internalCluster().getInstance(NodeClient.class); + final RestGetRepositoriesAction getRepoAction = new RestGetRepositoriesAction(Settings.EMPTY, mock(RestController.class), + internalCluster().getInstance(SettingsFilter.class)); + final RestRequest getRepoRequest = new FakeRestRequest(); + getRepoRequest.params().put("repository", repositoryName); + final CountDownLatch getRepoLatch = new CountDownLatch(1); + final AtomicReference getRepoError = new AtomicReference<>(); + getRepoAction.handleRequest(getRepoRequest, new AbstractRestChannel(getRepoRequest, true) { + @Override + public void sendResponse(RestResponse response) { + try { + assertThat(response.content().utf8ToString(), not(containsString("not_used_but_this_is_a_secret"))); + } catch (final AssertionError ex) { + getRepoError.set(ex); + } + getRepoLatch.countDown(); + } + }, nodeClient); + getRepoLatch.await(); + if (getRepoError.get() != null) { + throw getRepoError.get(); + } + } + } diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index 60fef4b34241d..e278ebf47983e 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -29,4 +29,115 @@ compileTestJava.options.compilerArgs << "-Xlint:-rawtypes,-unchecked" dependencies { compile "org.elasticsearch:elasticsearch-nio:${version}" -} \ No newline at end of file + + // network stack + compile "io.netty:netty-buffer:4.1.16.Final" + compile "io.netty:netty-codec:4.1.16.Final" + compile "io.netty:netty-codec-http:4.1.16.Final" + compile "io.netty:netty-common:4.1.16.Final" + compile "io.netty:netty-handler:4.1.16.Final" + compile "io.netty:netty-resolver:4.1.16.Final" + compile "io.netty:netty-transport:4.1.16.Final" +} + +thirdPartyAudit.excludes = [ + // classes are missing + + // from io.netty.handler.codec.protobuf.ProtobufDecoder (netty) + 'com.google.protobuf.ExtensionRegistry', + 'com.google.protobuf.MessageLite$Builder', + 'com.google.protobuf.MessageLite', + 'com.google.protobuf.Parser', + + // from io.netty.logging.CommonsLoggerFactory (netty) + 'org.apache.commons.logging.Log', + 'org.apache.commons.logging.LogFactory', + + // from io.netty.handler.ssl.OpenSslEngine (netty) + 'io.netty.internal.tcnative.Buffer', + 'io.netty.internal.tcnative.Library', + 'io.netty.internal.tcnative.SSL', + 'io.netty.internal.tcnative.SSLContext', + + // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) + 'org.bouncycastle.asn1.x500.X500Name', + 'org.bouncycastle.cert.X509v3CertificateBuilder', + 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', + 'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder', + 'org.bouncycastle.jce.provider.BouncyCastleProvider', + 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + + // from io.netty.handler.ssl.JettyNpnSslEngine (netty) + 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', + 'org.eclipse.jetty.npn.NextProtoNego$ServerProvider', + 'org.eclipse.jetty.npn.NextProtoNego', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteInput (netty) + 'org.jboss.marshalling.ByteInput', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteOutput (netty) + 'org.jboss.marshalling.ByteOutput', + + // from io.netty.handler.codec.marshalling.CompatibleMarshallingEncoder (netty) + 'org.jboss.marshalling.Marshaller', + + // from io.netty.handler.codec.marshalling.ContextBoundUnmarshallerProvider (netty) + 'org.jboss.marshalling.MarshallerFactory', + 'org.jboss.marshalling.MarshallingConfiguration', + 'org.jboss.marshalling.Unmarshaller', + + // from io.netty.util.internal.logging.InternalLoggerFactory (netty) - it's optional + 'org.slf4j.Logger', + 'org.slf4j.LoggerFactory', + + 'com.google.protobuf.ExtensionRegistryLite', + 'com.google.protobuf.MessageLiteOrBuilder', + 'com.google.protobuf.nano.CodedOutputByteBufferNano', + 'com.google.protobuf.nano.MessageNano', + 'com.jcraft.jzlib.Deflater', + 'com.jcraft.jzlib.Inflater', + 'com.jcraft.jzlib.JZlib$WrapperType', + 'com.jcraft.jzlib.JZlib', + 'com.ning.compress.BufferRecycler', + 'com.ning.compress.lzf.ChunkDecoder', + 'com.ning.compress.lzf.ChunkEncoder', + 'com.ning.compress.lzf.LZFEncoder', + 'com.ning.compress.lzf.util.ChunkDecoderFactory', + 'com.ning.compress.lzf.util.ChunkEncoderFactory', + 'lzma.sdk.lzma.Encoder', + 'net.jpountz.lz4.LZ4Compressor', + 'net.jpountz.lz4.LZ4Factory', + 'net.jpountz.lz4.LZ4FastDecompressor', + 'net.jpountz.xxhash.StreamingXXHash32', + 'net.jpountz.xxhash.XXHashFactory', + 'io.netty.internal.tcnative.CertificateRequestedCallback', + 'io.netty.internal.tcnative.CertificateRequestedCallback$KeyMaterial', + 'io.netty.internal.tcnative.CertificateVerifier', + 'io.netty.internal.tcnative.SessionTicketKey', + 'io.netty.internal.tcnative.SniHostNameMatcher', + 'org.eclipse.jetty.alpn.ALPN$ClientProvider', + 'org.eclipse.jetty.alpn.ALPN$ServerProvider', + 'org.eclipse.jetty.alpn.ALPN', + + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', + 'io.netty.util.internal.PlatformDependent0', + 'io.netty.util.internal.PlatformDependent0$1', + 'io.netty.util.internal.PlatformDependent0$2', + 'io.netty.util.internal.PlatformDependent0$3', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueConsumerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueProducerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + + 'org.conscrypt.AllocatedBuffer', + 'org.conscrypt.BufferAllocator', + 'org.conscrypt.Conscrypt$Engines', + 'org.conscrypt.HandshakeListener' +] \ No newline at end of file diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/ByteBufUtils.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/ByteBufUtils.java new file mode 100644 index 0000000000000..b4108b3e6c7d0 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/ByteBufUtils.java @@ -0,0 +1,252 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.http.nio; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.CompositeByteBuf; +import io.netty.buffer.Unpooled; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.EOFException; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; + +class ByteBufUtils { + + /** + * Turns the given BytesReference into a ByteBuf. Note: the returned ByteBuf will reference the internal + * pages of the BytesReference. Don't free the bytes of reference before the ByteBuf goes out of scope. + */ + static ByteBuf toByteBuf(final BytesReference reference) { + if (reference.length() == 0) { + return Unpooled.EMPTY_BUFFER; + } + if (reference instanceof ByteBufBytesReference) { + return ((ByteBufBytesReference) reference).toByteBuf(); + } else { + final BytesRefIterator iterator = reference.iterator(); + // usually we have one, two, or three components from the header, the message, and a buffer + final List buffers = new ArrayList<>(3); + try { + BytesRef slice; + while ((slice = iterator.next()) != null) { + buffers.add(Unpooled.wrappedBuffer(slice.bytes, slice.offset, slice.length)); + } + final CompositeByteBuf composite = Unpooled.compositeBuffer(buffers.size()); + composite.addComponents(true, buffers); + return composite; + } catch (IOException ex) { + throw new AssertionError("no IO happens here", ex); + } + } + } + + static BytesReference toBytesReference(final ByteBuf buffer) { + return new ByteBufBytesReference(buffer, buffer.readableBytes()); + } + + private static class ByteBufBytesReference extends BytesReference { + + private final ByteBuf buffer; + private final int length; + private final int offset; + + ByteBufBytesReference(ByteBuf buffer, int length) { + this.buffer = buffer; + this.length = length; + this.offset = buffer.readerIndex(); + assert length <= buffer.readableBytes() : "length[" + length +"] > " + buffer.readableBytes(); + } + + @Override + public byte get(int index) { + return buffer.getByte(offset + index); + } + + @Override + public int length() { + return length; + } + + @Override + public BytesReference slice(int from, int length) { + return new ByteBufBytesReference(buffer.slice(offset + from, length), length); + } + + @Override + public StreamInput streamInput() { + return new ByteBufStreamInput(buffer.duplicate(), length); + } + + @Override + public void writeTo(OutputStream os) throws IOException { + buffer.getBytes(offset, os, length); + } + + ByteBuf toByteBuf() { + return buffer.duplicate(); + } + + @Override + public String utf8ToString() { + return buffer.toString(offset, length, StandardCharsets.UTF_8); + } + + @Override + public BytesRef toBytesRef() { + if (buffer.hasArray()) { + return new BytesRef(buffer.array(), buffer.arrayOffset() + offset, length); + } + final byte[] copy = new byte[length]; + buffer.getBytes(offset, copy); + return new BytesRef(copy); + } + + @Override + public long ramBytesUsed() { + return buffer.capacity(); + } + + } + + private static class ByteBufStreamInput extends StreamInput { + + private final ByteBuf buffer; + private final int endIndex; + + ByteBufStreamInput(ByteBuf buffer, int length) { + if (length > buffer.readableBytes()) { + throw new IndexOutOfBoundsException(); + } + this.buffer = buffer; + int startIndex = buffer.readerIndex(); + endIndex = startIndex + length; + buffer.markReaderIndex(); + } + + @Override + public BytesReference readBytesReference(int length) throws IOException { + // NOTE: It is unsafe to share a reference of the internal structure, so we + // use the default implementation which will copy the bytes. It is unsafe because + // a netty ByteBuf might be pooled which requires a manual release to prevent + // memory leaks. + return super.readBytesReference(length); + } + + @Override + public BytesRef readBytesRef(int length) throws IOException { + // NOTE: It is unsafe to share a reference of the internal structure, so we + // use the default implementation which will copy the bytes. It is unsafe because + // a netty ByteBuf might be pooled which requires a manual release to prevent + // memory leaks. + return super.readBytesRef(length); + } + + @Override + public int available() throws IOException { + return endIndex - buffer.readerIndex(); + } + + @Override + protected void ensureCanReadBytes(int length) throws EOFException { + int bytesAvailable = endIndex - buffer.readerIndex(); + if (bytesAvailable < length) { + throw new EOFException("tried to read: " + length + " bytes but only " + bytesAvailable + " remaining"); + } + } + + @Override + public void mark(int readlimit) { + buffer.markReaderIndex(); + } + + @Override + public boolean markSupported() { + return true; + } + + @Override + public int read() throws IOException { + if (available() == 0) { + return -1; + } + return buffer.readByte() & 0xff; + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + if (len == 0) { + return 0; + } + int available = available(); + if (available == 0) { + return -1; + } + + len = Math.min(available, len); + buffer.readBytes(b, off, len); + return len; + } + + @Override + public void reset() throws IOException { + buffer.resetReaderIndex(); + } + + @Override + public long skip(long n) throws IOException { + if (n > Integer.MAX_VALUE) { + return skipBytes(Integer.MAX_VALUE); + } else { + return skipBytes((int) n); + } + } + + public int skipBytes(int n) throws IOException { + int nBytes = Math.min(available(), n); + buffer.skipBytes(nBytes); + return nBytes; + } + + + @Override + public byte readByte() throws IOException { + return buffer.readByte(); + } + + @Override + public void readBytes(byte[] b, int offset, int len) throws IOException { + int read = read(b, offset, len); + if (read < len) { + throw new IndexOutOfBoundsException(); + } + } + + @Override + public void close() throws IOException { + // nothing to do here + } + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java new file mode 100644 index 0000000000000..f1d18ddacbd13 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java @@ -0,0 +1,225 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandler; +import io.netty.handler.codec.ByteToMessageDecoder; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultHttpHeaders; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContentCompressor; +import io.netty.handler.codec.http.HttpContentDecompressor; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpObjectAggregator; +import io.netty.handler.codec.http.HttpRequestDecoder; +import io.netty.handler.codec.http.HttpResponseEncoder; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.http.HttpHandlingSettings; +import org.elasticsearch.nio.FlushOperation; +import org.elasticsearch.nio.InboundChannelBuffer; +import org.elasticsearch.nio.ReadWriteHandler; +import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.nio.SocketChannelContext; +import org.elasticsearch.nio.WriteOperation; +import org.elasticsearch.rest.RestRequest; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.function.BiConsumer; + +public class HttpReadWriteHandler implements ReadWriteHandler { + + private final NettyAdaptor adaptor; + private final NioSocketChannel nioChannel; + private final NioHttpServerTransport transport; + private final HttpHandlingSettings settings; + private final NamedXContentRegistry xContentRegistry; + private final ThreadContext threadContext; + + HttpReadWriteHandler(NioSocketChannel nioChannel, NioHttpServerTransport transport, HttpHandlingSettings settings, + NamedXContentRegistry xContentRegistry, ThreadContext threadContext) { + this.nioChannel = nioChannel; + this.transport = transport; + this.settings = settings; + this.xContentRegistry = xContentRegistry; + this.threadContext = threadContext; + + List handlers = new ArrayList<>(5); + HttpRequestDecoder decoder = new HttpRequestDecoder(settings.getMaxInitialLineLength(), settings.getMaxHeaderSize(), + settings.getMaxChunkSize()); + decoder.setCumulator(ByteToMessageDecoder.COMPOSITE_CUMULATOR); + handlers.add(decoder); + handlers.add(new HttpContentDecompressor()); + handlers.add(new HttpResponseEncoder()); + handlers.add(new HttpObjectAggregator(settings.getMaxContentLength())); + if (settings.isCompression()) { + handlers.add(new HttpContentCompressor(settings.getCompressionLevel())); + } + + adaptor = new NettyAdaptor(handlers.toArray(new ChannelHandler[0])); + adaptor.addCloseListener((v, e) -> nioChannel.close()); + } + + @Override + public int consumeReads(InboundChannelBuffer channelBuffer) throws IOException { + int bytesConsumed = adaptor.read(channelBuffer.sliceBuffersTo(channelBuffer.getIndex())); + Object message; + while ((message = adaptor.pollInboundMessage()) != null) { + handleRequest(message); + } + + return bytesConsumed; + } + + @Override + public WriteOperation createWriteOperation(SocketChannelContext context, Object message, BiConsumer listener) { + assert message instanceof FullHttpResponse : "This channel only supports messages that are of type: " + FullHttpResponse.class + + ". Found type: " + message.getClass() + "."; + return new HttpWriteOperation(context, (FullHttpResponse) message, listener); + } + + @Override + public List writeToBytes(WriteOperation writeOperation) { + adaptor.write(writeOperation); + return pollFlushOperations(); + } + + @Override + public List pollFlushOperations() { + ArrayList copiedOperations = new ArrayList<>(adaptor.getOutboundCount()); + FlushOperation flushOperation; + while ((flushOperation = adaptor.pollOutboundOperation()) != null) { + copiedOperations.add(flushOperation); + } + return copiedOperations; + } + + @Override + public void close() throws IOException { + try { + adaptor.close(); + } catch (Exception e) { + throw new IOException(e); + } + } + + private void handleRequest(Object msg) { + final FullHttpRequest request = (FullHttpRequest) msg; + + final FullHttpRequest copiedRequest = + new DefaultFullHttpRequest( + request.protocolVersion(), + request.method(), + request.uri(), + Unpooled.copiedBuffer(request.content()), + request.headers(), + request.trailingHeaders()); + + Exception badRequestCause = null; + + /* + * We want to create a REST request from the incoming request from Netty. However, creating this request could fail if there + * are incorrectly encoded parameters, or the Content-Type header is invalid. If one of these specific failures occurs, we + * attempt to create a REST request again without the input that caused the exception (e.g., we remove the Content-Type header, + * or skip decoding the parameters). Once we have a request in hand, we then dispatch the request as a bad request with the + * underlying exception that caused us to treat the request as bad. + */ + final NioHttpRequest httpRequest; + { + NioHttpRequest innerHttpRequest; + try { + innerHttpRequest = new NioHttpRequest(xContentRegistry, copiedRequest); + } catch (final RestRequest.ContentTypeHeaderException e) { + badRequestCause = e; + innerHttpRequest = requestWithoutContentTypeHeader(copiedRequest, badRequestCause); + } catch (final RestRequest.BadParameterException e) { + badRequestCause = e; + innerHttpRequest = requestWithoutParameters(copiedRequest); + } + httpRequest = innerHttpRequest; + } + + /* + * We now want to create a channel used to send the response on. However, creating this channel can fail if there are invalid + * parameter values for any of the filter_path, human, or pretty parameters. We detect these specific failures via an + * IllegalArgumentException from the channel constructor and then attempt to create a new channel that bypasses parsing of these + * parameter values. + */ + final NioHttpChannel channel; + { + NioHttpChannel innerChannel; + try { + innerChannel = new NioHttpChannel(nioChannel, transport.getBigArrays(), httpRequest, settings, threadContext); + } catch (final IllegalArgumentException e) { + if (badRequestCause == null) { + badRequestCause = e; + } else { + badRequestCause.addSuppressed(e); + } + final NioHttpRequest innerRequest = + new NioHttpRequest( + xContentRegistry, + Collections.emptyMap(), // we are going to dispatch the request as a bad request, drop all parameters + copiedRequest.uri(), + copiedRequest); + innerChannel = new NioHttpChannel(nioChannel, transport.getBigArrays(), innerRequest, settings, threadContext); + } + channel = innerChannel; + } + + if (request.decoderResult().isFailure()) { + transport.dispatchBadRequest(httpRequest, channel, request.decoderResult().cause()); + } else if (badRequestCause != null) { + transport.dispatchBadRequest(httpRequest, channel, badRequestCause); + } else { + transport.dispatchRequest(httpRequest, channel); + } + } + + private NioHttpRequest requestWithoutContentTypeHeader(final FullHttpRequest request, final Exception badRequestCause) { + final HttpHeaders headersWithoutContentTypeHeader = new DefaultHttpHeaders(); + headersWithoutContentTypeHeader.add(request.headers()); + headersWithoutContentTypeHeader.remove("Content-Type"); + final FullHttpRequest requestWithoutContentTypeHeader = + new DefaultFullHttpRequest( + request.protocolVersion(), + request.method(), + request.uri(), + request.content(), + headersWithoutContentTypeHeader, // remove the Content-Type header so as to not parse it again + request.trailingHeaders()); // Content-Type can not be a trailing header + try { + return new NioHttpRequest(xContentRegistry, requestWithoutContentTypeHeader); + } catch (final RestRequest.BadParameterException e) { + badRequestCause.addSuppressed(e); + return requestWithoutParameters(requestWithoutContentTypeHeader); + } + } + + private NioHttpRequest requestWithoutParameters(final FullHttpRequest request) { + // remove all parameters as at least one is incorrectly encoded + return new NioHttpRequest(xContentRegistry, Collections.emptyMap(), request.uri(), request); + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpWriteOperation.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpWriteOperation.java new file mode 100644 index 0000000000000..c838ae85e9d40 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpWriteOperation.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.handler.codec.http.FullHttpResponse; +import org.elasticsearch.nio.SocketChannelContext; +import org.elasticsearch.nio.WriteOperation; + +import java.util.function.BiConsumer; + +public class HttpWriteOperation implements WriteOperation { + + private final SocketChannelContext channelContext; + private final FullHttpResponse response; + private final BiConsumer listener; + + HttpWriteOperation(SocketChannelContext channelContext, FullHttpResponse response, BiConsumer listener) { + this.channelContext = channelContext; + this.response = response; + this.listener = listener; + } + + @Override + public BiConsumer getListener() { + return listener; + } + + @Override + public SocketChannelContext getChannel() { + return channelContext; + } + + @Override + public FullHttpResponse getObject() { + return response; + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java new file mode 100644 index 0000000000000..3344a31264121 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelOutboundHandlerAdapter; +import io.netty.channel.ChannelPromise; +import io.netty.channel.embedded.EmbeddedChannel; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.nio.FlushOperation; +import org.elasticsearch.nio.WriteOperation; + +import java.nio.ByteBuffer; +import java.util.LinkedList; +import java.util.function.BiConsumer; + +public class NettyAdaptor implements AutoCloseable { + + private final EmbeddedChannel nettyChannel; + private final LinkedList flushOperations = new LinkedList<>(); + + NettyAdaptor(ChannelHandler... handlers) { + nettyChannel = new EmbeddedChannel(); + nettyChannel.pipeline().addLast("write_captor", new ChannelOutboundHandlerAdapter() { + + @Override + public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) { + // This is a little tricky. The embedded channel will complete the promise once it writes the message + // to its outbound buffer. We do not want to complete the promise until the message is sent. So we + // intercept the promise and pass a different promise back to the rest of the pipeline. + + try { + ByteBuf message = (ByteBuf) msg; + promise.addListener((f) -> message.release()); + NettyListener listener; + if (promise instanceof NettyListener) { + listener = (NettyListener) promise; + } else { + listener = new NettyListener(promise); + } + flushOperations.add(new FlushOperation(message.nioBuffers(), listener)); + } catch (Exception e) { + promise.setFailure(e); + } + } + }); + nettyChannel.pipeline().addLast(handlers); + } + + @Override + public void close() throws Exception { + assert flushOperations.isEmpty() : "Should close outbound operations before calling close"; + + ChannelFuture closeFuture = nettyChannel.close(); + // This should be safe as we are not a real network channel + closeFuture.await(); + if (closeFuture.isSuccess() == false) { + Throwable cause = closeFuture.cause(); + ExceptionsHelper.dieOnError(cause); + throw (Exception) cause; + } + } + + public void addCloseListener(BiConsumer listener) { + nettyChannel.closeFuture().addListener(f -> { + if (f.isSuccess()) { + listener.accept(null, null); + } else { + final Throwable cause = f.cause(); + ExceptionsHelper.dieOnError(cause); + assert cause instanceof Exception; + listener.accept(null, (Exception) cause); + } + }); + } + + public int read(ByteBuffer[] buffers) { + ByteBuf byteBuf = Unpooled.wrappedBuffer(buffers); + int initialReaderIndex = byteBuf.readerIndex(); + nettyChannel.writeInbound(byteBuf); + return byteBuf.readerIndex() - initialReaderIndex; + } + + public Object pollInboundMessage() { + return nettyChannel.readInbound(); + } + + public void write(WriteOperation writeOperation) { + ChannelPromise channelPromise = nettyChannel.newPromise(); + channelPromise.addListener(f -> { + BiConsumer consumer = writeOperation.getListener(); + if (f.cause() == null) { + consumer.accept(null, null); + } else { + ExceptionsHelper.dieOnError(f.cause()); + consumer.accept(null, f.cause()); + } + }); + + nettyChannel.writeAndFlush(writeOperation.getObject(), new NettyListener(channelPromise)); + } + + public FlushOperation pollOutboundOperation() { + return flushOperations.pollFirst(); + } + + public int getOutboundCount() { + return flushOperations.size(); + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java new file mode 100644 index 0000000000000..e806b0d23ce3a --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java @@ -0,0 +1,214 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelPromise; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.GenericFutureListener; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.concurrent.FutureUtils; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.BiConsumer; + +/** + * This is an {@link BiConsumer} that interfaces with netty code. It wraps a netty promise and will + * complete that promise when accept is called. It delegates the normal promise methods to the underlying + * promise. + */ +public class NettyListener implements BiConsumer, ChannelPromise { + + private final ChannelPromise promise; + + NettyListener(ChannelPromise promise) { + this.promise = promise; + } + + @Override + public void accept(Void v, Throwable throwable) { + if (throwable == null) { + promise.setSuccess(); + } else { + promise.setFailure(throwable); + } + } + + @Override + public Channel channel() { + return promise.channel(); + } + + @Override + public ChannelPromise setSuccess(Void result) { + return promise.setSuccess(result); + } + + @Override + public boolean trySuccess(Void result) { + return promise.trySuccess(result); + } + + @Override + public ChannelPromise setSuccess() { + return promise.setSuccess(); + } + + @Override + public boolean trySuccess() { + return promise.trySuccess(); + } + + @Override + public ChannelPromise setFailure(Throwable cause) { + return promise.setFailure(cause); + } + + @Override + public boolean tryFailure(Throwable cause) { + return promise.tryFailure(cause); + } + + @Override + public boolean setUncancellable() { + return promise.setUncancellable(); + } + + @Override + public boolean isSuccess() { + return promise.isSuccess(); + } + + @Override + public boolean isCancellable() { + return promise.isCancellable(); + } + + @Override + public Throwable cause() { + return promise.cause(); + } + + @Override + public ChannelPromise addListener(GenericFutureListener> listener) { + return promise.addListener(listener); + } + + @Override + @SafeVarargs + @SuppressWarnings("varargs") + public final ChannelPromise addListeners(GenericFutureListener>... listeners) { + return promise.addListeners(listeners); + } + + @Override + public ChannelPromise removeListener(GenericFutureListener> listener) { + return promise.removeListener(listener); + } + + @Override + @SafeVarargs + @SuppressWarnings("varargs") + public final ChannelPromise removeListeners(GenericFutureListener>... listeners) { + return promise.removeListeners(listeners); + } + + @Override + public ChannelPromise sync() throws InterruptedException { + return promise.sync(); + } + + @Override + public ChannelPromise syncUninterruptibly() { + return promise.syncUninterruptibly(); + } + + @Override + public ChannelPromise await() throws InterruptedException { + return promise.await(); + } + + @Override + public ChannelPromise awaitUninterruptibly() { + return promise.awaitUninterruptibly(); + } + + @Override + public boolean await(long timeout, TimeUnit unit) throws InterruptedException { + return promise.await(timeout, unit); + } + + @Override + public boolean await(long timeoutMillis) throws InterruptedException { + return promise.await(timeoutMillis); + } + + @Override + public boolean awaitUninterruptibly(long timeout, TimeUnit unit) { + return promise.awaitUninterruptibly(timeout, unit); + } + + @Override + public boolean awaitUninterruptibly(long timeoutMillis) { + return promise.awaitUninterruptibly(timeoutMillis); + } + + @Override + public Void getNow() { + return promise.getNow(); + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return FutureUtils.cancel(promise); + } + + @Override + public boolean isCancelled() { + return promise.isCancelled(); + } + + @Override + public boolean isDone() { + return promise.isDone(); + } + + @Override + public Void get() throws InterruptedException, ExecutionException { + return promise.get(); + } + + @Override + public Void get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { + return promise.get(timeout, unit); + } + + @Override + public boolean isVoid() { + return promise.isVoid(); + } + + @Override + public ChannelPromise unvoid() { + return promise.unvoid(); + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java new file mode 100644 index 0000000000000..672c6d5abad0e --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java @@ -0,0 +1,254 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaderValues; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.codec.http.cookie.Cookie; +import io.netty.handler.codec.http.cookie.ServerCookieDecoder; +import io.netty.handler.codec.http.cookie.ServerCookieEncoder; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.http.HttpHandlingSettings; +import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.rest.AbstractRestChannel; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.EnumMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class NioHttpChannel extends AbstractRestChannel { + + private final BigArrays bigArrays; + private final ThreadContext threadContext; + private final FullHttpRequest nettyRequest; + private final NioSocketChannel nioChannel; + private final boolean resetCookies; + + NioHttpChannel(NioSocketChannel nioChannel, BigArrays bigArrays, NioHttpRequest request, + HttpHandlingSettings settings, ThreadContext threadContext) { + super(request, settings.getDetailedErrorsEnabled()); + this.nioChannel = nioChannel; + this.bigArrays = bigArrays; + this.threadContext = threadContext; + this.nettyRequest = request.getRequest(); + this.resetCookies = settings.isResetCookies(); + } + + @Override + public void sendResponse(RestResponse response) { + // if the response object was created upstream, then use it; + // otherwise, create a new one + ByteBuf buffer = ByteBufUtils.toByteBuf(response.content()); + final FullHttpResponse resp; + if (HttpMethod.HEAD.equals(nettyRequest.method())) { + resp = newResponse(Unpooled.EMPTY_BUFFER); + } else { + resp = newResponse(buffer); + } + resp.setStatus(getStatus(response.status())); + + String opaque = nettyRequest.headers().get("X-Opaque-Id"); + if (opaque != null) { + setHeaderField(resp, "X-Opaque-Id", opaque); + } + + // Add all custom headers + addCustomHeaders(resp, response.getHeaders()); + addCustomHeaders(resp, threadContext.getResponseHeaders()); + + ArrayList toClose = new ArrayList<>(3); + + boolean success = false; + try { + // If our response doesn't specify a content-type header, set one + setHeaderField(resp, HttpHeaderNames.CONTENT_TYPE.toString(), response.contentType(), false); + // If our response has no content-length, calculate and set one + setHeaderField(resp, HttpHeaderNames.CONTENT_LENGTH.toString(), String.valueOf(buffer.readableBytes()), false); + + addCookies(resp); + + BytesReference content = response.content(); + if (content instanceof Releasable) { + toClose.add((Releasable) content); + } + BytesStreamOutput bytesStreamOutput = bytesOutputOrNull(); + if (bytesStreamOutput instanceof ReleasableBytesStreamOutput) { + toClose.add((Releasable) bytesStreamOutput); + } + + if (isCloseConnection()) { + toClose.add(nioChannel::close); + } + + nioChannel.getContext().sendMessage(resp, (aVoid, throwable) -> { + Releasables.close(toClose); + }); + success = true; + } finally { + if (success == false) { + Releasables.close(toClose); + } + } + } + + @Override + protected BytesStreamOutput newBytesOutput() { + return new ReleasableBytesStreamOutput(bigArrays); + } + + private void setHeaderField(HttpResponse resp, String headerField, String value) { + setHeaderField(resp, headerField, value, true); + } + + private void setHeaderField(HttpResponse resp, String headerField, String value, boolean override) { + if (override || !resp.headers().contains(headerField)) { + resp.headers().add(headerField, value); + } + } + + private void addCookies(HttpResponse resp) { + if (resetCookies) { + String cookieString = nettyRequest.headers().get(HttpHeaderNames.COOKIE); + if (cookieString != null) { + Set cookies = ServerCookieDecoder.STRICT.decode(cookieString); + if (!cookies.isEmpty()) { + // Reset the cookies if necessary. + resp.headers().set(HttpHeaderNames.SET_COOKIE, ServerCookieEncoder.STRICT.encode(cookies)); + } + } + } + } + + private void addCustomHeaders(HttpResponse response, Map> customHeaders) { + if (customHeaders != null) { + for (Map.Entry> headerEntry : customHeaders.entrySet()) { + for (String headerValue : headerEntry.getValue()) { + setHeaderField(response, headerEntry.getKey(), headerValue); + } + } + } + } + + // Create a new {@link HttpResponse} to transmit the response for the netty request. + private FullHttpResponse newResponse(ByteBuf buffer) { + final boolean http10 = isHttp10(); + final boolean close = isCloseConnection(); + // Build the response object. + final HttpResponseStatus status = HttpResponseStatus.OK; // default to initialize + final FullHttpResponse response; + if (http10) { + response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_0, status, buffer); + if (!close) { + response.headers().add(HttpHeaderNames.CONNECTION, "Keep-Alive"); + } + } else { + response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, status, buffer); + } + return response; + } + + // Determine if the request protocol version is HTTP 1.0 + private boolean isHttp10() { + return nettyRequest.protocolVersion().equals(HttpVersion.HTTP_1_0); + } + + // Determine if the request connection should be closed on completion. + private boolean isCloseConnection() { + final boolean http10 = isHttp10(); + return HttpHeaderValues.CLOSE.contentEqualsIgnoreCase(nettyRequest.headers().get(HttpHeaderNames.CONNECTION)) || + (http10 && !HttpHeaderValues.KEEP_ALIVE.contentEqualsIgnoreCase(nettyRequest.headers().get(HttpHeaderNames.CONNECTION))); + } + + private static Map MAP; + + static { + EnumMap map = new EnumMap<>(RestStatus.class); + map.put(RestStatus.CONTINUE, HttpResponseStatus.CONTINUE); + map.put(RestStatus.SWITCHING_PROTOCOLS, HttpResponseStatus.SWITCHING_PROTOCOLS); + map.put(RestStatus.OK, HttpResponseStatus.OK); + map.put(RestStatus.CREATED, HttpResponseStatus.CREATED); + map.put(RestStatus.ACCEPTED, HttpResponseStatus.ACCEPTED); + map.put(RestStatus.NON_AUTHORITATIVE_INFORMATION, HttpResponseStatus.NON_AUTHORITATIVE_INFORMATION); + map.put(RestStatus.NO_CONTENT, HttpResponseStatus.NO_CONTENT); + map.put(RestStatus.RESET_CONTENT, HttpResponseStatus.RESET_CONTENT); + map.put(RestStatus.PARTIAL_CONTENT, HttpResponseStatus.PARTIAL_CONTENT); + map.put(RestStatus.MULTI_STATUS, HttpResponseStatus.INTERNAL_SERVER_ERROR); // no status for this?? + map.put(RestStatus.MULTIPLE_CHOICES, HttpResponseStatus.MULTIPLE_CHOICES); + map.put(RestStatus.MOVED_PERMANENTLY, HttpResponseStatus.MOVED_PERMANENTLY); + map.put(RestStatus.FOUND, HttpResponseStatus.FOUND); + map.put(RestStatus.SEE_OTHER, HttpResponseStatus.SEE_OTHER); + map.put(RestStatus.NOT_MODIFIED, HttpResponseStatus.NOT_MODIFIED); + map.put(RestStatus.USE_PROXY, HttpResponseStatus.USE_PROXY); + map.put(RestStatus.TEMPORARY_REDIRECT, HttpResponseStatus.TEMPORARY_REDIRECT); + map.put(RestStatus.BAD_REQUEST, HttpResponseStatus.BAD_REQUEST); + map.put(RestStatus.UNAUTHORIZED, HttpResponseStatus.UNAUTHORIZED); + map.put(RestStatus.PAYMENT_REQUIRED, HttpResponseStatus.PAYMENT_REQUIRED); + map.put(RestStatus.FORBIDDEN, HttpResponseStatus.FORBIDDEN); + map.put(RestStatus.NOT_FOUND, HttpResponseStatus.NOT_FOUND); + map.put(RestStatus.METHOD_NOT_ALLOWED, HttpResponseStatus.METHOD_NOT_ALLOWED); + map.put(RestStatus.NOT_ACCEPTABLE, HttpResponseStatus.NOT_ACCEPTABLE); + map.put(RestStatus.PROXY_AUTHENTICATION, HttpResponseStatus.PROXY_AUTHENTICATION_REQUIRED); + map.put(RestStatus.REQUEST_TIMEOUT, HttpResponseStatus.REQUEST_TIMEOUT); + map.put(RestStatus.CONFLICT, HttpResponseStatus.CONFLICT); + map.put(RestStatus.GONE, HttpResponseStatus.GONE); + map.put(RestStatus.LENGTH_REQUIRED, HttpResponseStatus.LENGTH_REQUIRED); + map.put(RestStatus.PRECONDITION_FAILED, HttpResponseStatus.PRECONDITION_FAILED); + map.put(RestStatus.REQUEST_ENTITY_TOO_LARGE, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE); + map.put(RestStatus.REQUEST_URI_TOO_LONG, HttpResponseStatus.REQUEST_URI_TOO_LONG); + map.put(RestStatus.UNSUPPORTED_MEDIA_TYPE, HttpResponseStatus.UNSUPPORTED_MEDIA_TYPE); + map.put(RestStatus.REQUESTED_RANGE_NOT_SATISFIED, HttpResponseStatus.REQUESTED_RANGE_NOT_SATISFIABLE); + map.put(RestStatus.EXPECTATION_FAILED, HttpResponseStatus.EXPECTATION_FAILED); + map.put(RestStatus.UNPROCESSABLE_ENTITY, HttpResponseStatus.BAD_REQUEST); + map.put(RestStatus.LOCKED, HttpResponseStatus.BAD_REQUEST); + map.put(RestStatus.FAILED_DEPENDENCY, HttpResponseStatus.BAD_REQUEST); + map.put(RestStatus.TOO_MANY_REQUESTS, HttpResponseStatus.TOO_MANY_REQUESTS); + map.put(RestStatus.INTERNAL_SERVER_ERROR, HttpResponseStatus.INTERNAL_SERVER_ERROR); + map.put(RestStatus.NOT_IMPLEMENTED, HttpResponseStatus.NOT_IMPLEMENTED); + map.put(RestStatus.BAD_GATEWAY, HttpResponseStatus.BAD_GATEWAY); + map.put(RestStatus.SERVICE_UNAVAILABLE, HttpResponseStatus.SERVICE_UNAVAILABLE); + map.put(RestStatus.GATEWAY_TIMEOUT, HttpResponseStatus.GATEWAY_TIMEOUT); + map.put(RestStatus.HTTP_VERSION_NOT_SUPPORTED, HttpResponseStatus.HTTP_VERSION_NOT_SUPPORTED); + MAP = Collections.unmodifiableMap(map); + } + + private static HttpResponseStatus getStatus(RestStatus status) { + return MAP.getOrDefault(status, HttpResponseStatus.INTERNAL_SERVER_ERROR); + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpRequest.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpRequest.java new file mode 100644 index 0000000000000..b5bfcc6b0cca2 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpRequest.java @@ -0,0 +1,186 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpMethod; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.rest.RestRequest; + +import java.util.AbstractMap; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +public class NioHttpRequest extends RestRequest { + + private final FullHttpRequest request; + private final BytesReference content; + + NioHttpRequest(NamedXContentRegistry xContentRegistry, FullHttpRequest request) { + super(xContentRegistry, request.uri(), new HttpHeadersMap(request.headers())); + this.request = request; + if (request.content().isReadable()) { + this.content = ByteBufUtils.toBytesReference(request.content()); + } else { + this.content = BytesArray.EMPTY; + } + + } + + NioHttpRequest(NamedXContentRegistry xContentRegistry, Map params, String uri, FullHttpRequest request) { + super(xContentRegistry, params, uri, new HttpHeadersMap(request.headers())); + this.request = request; + if (request.content().isReadable()) { + this.content = ByteBufUtils.toBytesReference(request.content()); + } else { + this.content = BytesArray.EMPTY; + } + } + + @Override + public Method method() { + HttpMethod httpMethod = request.method(); + if (httpMethod == HttpMethod.GET) + return Method.GET; + + if (httpMethod == HttpMethod.POST) + return Method.POST; + + if (httpMethod == HttpMethod.PUT) + return Method.PUT; + + if (httpMethod == HttpMethod.DELETE) + return Method.DELETE; + + if (httpMethod == HttpMethod.HEAD) { + return Method.HEAD; + } + + if (httpMethod == HttpMethod.OPTIONS) { + return Method.OPTIONS; + } + + return Method.GET; + } + + @Override + public String uri() { + return request.uri(); + } + + @Override + public boolean hasContent() { + return content.length() > 0; + } + + @Override + public BytesReference content() { + return content; + } + + public FullHttpRequest getRequest() { + return request; + } + + /** + * A wrapper of {@link HttpHeaders} that implements a map to prevent copying unnecessarily. This class does not support modifications + * and due to the underlying implementation, it performs case insensitive lookups of key to values. + * + * It is important to note that this implementation does have some downsides in that each invocation of the + * {@link #values()} and {@link #entrySet()} methods will perform a copy of the values in the HttpHeaders rather than returning a + * view of the underlying values. + */ + private static class HttpHeadersMap implements Map> { + + private final HttpHeaders httpHeaders; + + private HttpHeadersMap(HttpHeaders httpHeaders) { + this.httpHeaders = httpHeaders; + } + + @Override + public int size() { + return httpHeaders.size(); + } + + @Override + public boolean isEmpty() { + return httpHeaders.isEmpty(); + } + + @Override + public boolean containsKey(Object key) { + return key instanceof String && httpHeaders.contains((String) key); + } + + @Override + public boolean containsValue(Object value) { + return value instanceof List && httpHeaders.names().stream().map(httpHeaders::getAll).anyMatch(value::equals); + } + + @Override + public List get(Object key) { + return key instanceof String ? httpHeaders.getAll((String) key) : null; + } + + @Override + public List put(String key, List value) { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public List remove(Object key) { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public void putAll(Map> m) { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public void clear() { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public Set keySet() { + return httpHeaders.names(); + } + + @Override + public Collection> values() { + return httpHeaders.names().stream().map(k -> Collections.unmodifiableList(httpHeaders.getAll(k))).collect(Collectors.toList()); + } + + @Override + public Set>> entrySet() { + return httpHeaders.names().stream().map(k -> new AbstractMap.SimpleImmutableEntry<>(k, httpHeaders.getAll(k))) + .collect(Collectors.toSet()); + } + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java new file mode 100644 index 0000000000000..bdbee715bd0cf --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java @@ -0,0 +1,322 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.handler.timeout.ReadTimeoutException; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.NetworkExceptionHelper; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.http.BindHttpException; +import org.elasticsearch.http.HttpHandlingSettings; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.http.HttpStats; +import org.elasticsearch.http.netty4.AbstractHttpServerTransport; +import org.elasticsearch.nio.AcceptingSelector; +import org.elasticsearch.nio.AcceptorEventHandler; +import org.elasticsearch.nio.BytesChannelContext; +import org.elasticsearch.nio.ChannelFactory; +import org.elasticsearch.nio.InboundChannelBuffer; +import org.elasticsearch.nio.NioChannel; +import org.elasticsearch.nio.NioGroup; +import org.elasticsearch.nio.NioServerSocketChannel; +import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.nio.ServerChannelContext; +import org.elasticsearch.nio.SocketChannelContext; +import org.elasticsearch.nio.SocketEventHandler; +import org.elasticsearch.nio.SocketSelector; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import static org.elasticsearch.common.settings.Setting.intSetting; +import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_KEEP_ALIVE; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_NO_DELAY; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_REUSE_ADDRESS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE; + +public class NioHttpServerTransport extends AbstractHttpServerTransport { + + public static final Setting NIO_HTTP_ACCEPTOR_COUNT = + intSetting("http.nio.acceptor_count", 1, 1, Setting.Property.NodeScope); + public static final Setting NIO_HTTP_WORKER_COUNT = + new Setting<>("http.nio.worker_count", + (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), + (s) -> Setting.parseInt(s, 1, "http.nio.worker_count"), Setting.Property.NodeScope); + + private static final String TRANSPORT_WORKER_THREAD_NAME_PREFIX = "http_nio_transport_worker"; + private static final String TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX = "http_nio_transport_acceptor"; + + private final BigArrays bigArrays; + private final ThreadPool threadPool; + private final NamedXContentRegistry xContentRegistry; + + private final HttpHandlingSettings httpHandlingSettings; + + private final boolean tcpNoDelay; + private final boolean tcpKeepAlive; + private final boolean reuseAddress; + private final int tcpSendBufferSize; + private final int tcpReceiveBufferSize; + + private final Set serverChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); + private final Set socketChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); + private NioGroup nioGroup; + private HttpChannelFactory channelFactory; + + public NioHttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, ThreadPool threadPool, + NamedXContentRegistry xContentRegistry, HttpServerTransport.Dispatcher dispatcher) { + super(settings, networkService, threadPool, dispatcher); + this.bigArrays = bigArrays; + this.threadPool = threadPool; + this.xContentRegistry = xContentRegistry; + + ByteSizeValue maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings); + ByteSizeValue maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); + ByteSizeValue maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings); + this.httpHandlingSettings = new HttpHandlingSettings(Math.toIntExact(maxContentLength.getBytes()), + Math.toIntExact(maxChunkSize.getBytes()), + Math.toIntExact(maxHeaderSize.getBytes()), + Math.toIntExact(maxInitialLineLength.getBytes()), + SETTING_HTTP_RESET_COOKIES.get(settings), + SETTING_HTTP_COMPRESSION.get(settings), + SETTING_HTTP_COMPRESSION_LEVEL.get(settings), + SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings)); + + this.tcpNoDelay = SETTING_HTTP_TCP_NO_DELAY.get(settings); + this.tcpKeepAlive = SETTING_HTTP_TCP_KEEP_ALIVE.get(settings); + this.reuseAddress = SETTING_HTTP_TCP_REUSE_ADDRESS.get(settings); + this.tcpSendBufferSize = Math.toIntExact(SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings).getBytes()); + this.tcpReceiveBufferSize = Math.toIntExact(SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings).getBytes()); + + + logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}]", + maxChunkSize, maxHeaderSize, maxInitialLineLength, maxContentLength); + } + + BigArrays getBigArrays() { + return bigArrays; + } + + @Override + protected void doStart() { + boolean success = false; + try { + int acceptorCount = NIO_HTTP_ACCEPTOR_COUNT.get(settings); + int workerCount = NIO_HTTP_WORKER_COUNT.get(settings); + nioGroup = new NioGroup(logger, daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount, + AcceptorEventHandler::new, daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX), + workerCount, SocketEventHandler::new); + channelFactory = new HttpChannelFactory(); + this.boundAddress = createBoundHttpAddress(); + + if (logger.isInfoEnabled()) { + logger.info("{}", boundAddress); + } + + success = true; + } catch (IOException e) { + throw new ElasticsearchException(e); + } finally { + if (success == false) { + doStop(); // otherwise we leak threads since we never moved to started + } + } + } + + @Override + protected void doStop() { + synchronized (serverChannels) { + if (serverChannels.isEmpty() == false) { + try { + closeChannels(new ArrayList<>(serverChannels)); + } catch (Exception e) { + logger.error("unexpected exception while closing http server channels", e); + } + serverChannels.clear(); + } + } + + try { + closeChannels(new ArrayList<>(socketChannels)); + } catch (Exception e) { + logger.warn("unexpected exception while closing http channels", e); + } + socketChannels.clear(); + + try { + nioGroup.close(); + } catch (Exception e) { + logger.warn("unexpected exception while stopping nio group", e); + } + } + + @Override + protected void doClose() throws IOException { + } + + @Override + protected TransportAddress bindAddress(InetAddress hostAddress) { + final AtomicReference lastException = new AtomicReference<>(); + final AtomicReference boundSocket = new AtomicReference<>(); + boolean success = port.iterate(portNumber -> { + try { + synchronized (serverChannels) { + InetSocketAddress address = new InetSocketAddress(hostAddress, portNumber); + NioServerSocketChannel channel = nioGroup.bindServerChannel(address, channelFactory); + serverChannels.add(channel); + boundSocket.set(channel.getLocalAddress()); + } + } catch (Exception e) { + lastException.set(e); + return false; + } + return true; + }); + if (success == false) { + throw new BindHttpException("Failed to bind to [" + port.getPortRangeString() + "]", lastException.get()); + } + + if (logger.isDebugEnabled()) { + logger.debug("Bound http to address {{}}", NetworkAddress.format(boundSocket.get())); + } + return new TransportAddress(boundSocket.get()); + } + + @Override + public HttpStats stats() { + return new HttpStats(serverChannels.size(), socketChannels.size()); + } + + protected void exceptionCaught(NioSocketChannel channel, Exception cause) { + if (cause instanceof ReadTimeoutException) { + if (logger.isTraceEnabled()) { + logger.trace("Read timeout [{}]", channel.getRemoteAddress()); + } + channel.close(); + } else { + if (lifecycle.started() == false) { + // ignore + return; + } + if (NetworkExceptionHelper.isCloseConnectionException(cause) == false) { + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "caught exception while handling client http traffic, closing connection {}", channel), + cause); + channel.close(); + } else { + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "caught exception while handling client http traffic, closing connection {}", channel), + cause); + channel.close(); + } + } + } + + private void closeChannels(List channels) { + List> futures = new ArrayList<>(channels.size()); + + for (NioChannel channel : channels) { + PlainActionFuture future = PlainActionFuture.newFuture(); + channel.addCloseListener(ActionListener.toBiConsumer(future)); + futures.add(future); + channel.close(); + } + + List closeExceptions = new ArrayList<>(); + for (ActionFuture f : futures) { + try { + f.actionGet(); + } catch (RuntimeException e) { + closeExceptions.add(e); + } + } + + ExceptionsHelper.rethrowAndSuppress(closeExceptions); + } + + private void acceptChannel(NioSocketChannel socketChannel) { + socketChannels.add(socketChannel); + } + + private class HttpChannelFactory extends ChannelFactory { + + private HttpChannelFactory() { + super(new RawChannelFactory(tcpNoDelay, tcpKeepAlive, reuseAddress, tcpSendBufferSize, tcpReceiveBufferSize)); + } + + @Override + public NioSocketChannel createChannel(SocketSelector selector, SocketChannel channel) throws IOException { + NioSocketChannel nioChannel = new NioSocketChannel(channel); + HttpReadWriteHandler httpReadWritePipeline = new HttpReadWriteHandler(nioChannel,NioHttpServerTransport.this, + httpHandlingSettings, xContentRegistry, threadPool.getThreadContext()); + Consumer exceptionHandler = (e) -> exceptionCaught(nioChannel, e); + SocketChannelContext context = new BytesChannelContext(nioChannel, selector, exceptionHandler, httpReadWritePipeline, + InboundChannelBuffer.allocatingInstance()); + nioChannel.setContext(context); + return nioChannel; + } + + @Override + public NioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { + NioServerSocketChannel nioChannel = new NioServerSocketChannel(channel); + ServerChannelContext context = new ServerChannelContext(nioChannel, this, selector, NioHttpServerTransport.this::acceptChannel, + (e) -> {}); + nioChannel.setContext(context); + return nioChannel; + } + + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index eb3d7f3d710dc..9d794f951c8d2 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -21,7 +21,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.recycler.Recycler; @@ -39,7 +38,6 @@ import org.elasticsearch.nio.NioGroup; import org.elasticsearch.nio.NioSocketChannel; import org.elasticsearch.nio.ServerChannelContext; -import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.nio.SocketEventHandler; import org.elasticsearch.nio.SocketSelector; import org.elasticsearch.threadpool.ThreadPool; @@ -184,10 +182,9 @@ public TcpNioSocketChannel createChannel(SocketSelector selector, SocketChannel Recycler.V bytes = pageCacheRecycler.bytePage(false); return new InboundChannelBuffer.Page(ByteBuffer.wrap(bytes.v()), bytes::close); }; - SocketChannelContext.ReadConsumer nioReadConsumer = channelBuffer -> - consumeNetworkReads(nioChannel, BytesReference.fromByteBuffers(channelBuffer.sliceBuffersTo(channelBuffer.getIndex()))); + TcpReadWriteHandler readWriteHandler = new TcpReadWriteHandler(nioChannel, NioTransport.this); Consumer exceptionHandler = (e) -> exceptionCaught(nioChannel, e); - BytesChannelContext context = new BytesChannelContext(nioChannel, selector, exceptionHandler, nioReadConsumer, + BytesChannelContext context = new BytesChannelContext(nioChannel, selector, exceptionHandler, readWriteHandler, new InboundChannelBuffer(pageSupplier)); nioChannel.setContext(context); return nioChannel; diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java index 029507a5ba49d..422e3e9b83330 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java @@ -19,14 +19,15 @@ package org.elasticsearch.transport.nio; -import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.http.nio.NioHttpServerTransport; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.Plugin; @@ -42,10 +43,13 @@ public class NioTransportPlugin extends Plugin implements NetworkPlugin { public static final String NIO_TRANSPORT_NAME = "nio-transport"; + public static final String NIO_HTTP_TRANSPORT_NAME = "nio-http-transport"; @Override public List> getSettings() { return Arrays.asList( + NioHttpServerTransport.NIO_HTTP_ACCEPTOR_COUNT, + NioHttpServerTransport.NIO_HTTP_WORKER_COUNT, NioTransport.NIO_WORKER_COUNT, NioTransport.NIO_ACCEPTOR_COUNT ); @@ -61,4 +65,15 @@ public Map> getTransports(Settings settings, ThreadP () -> new NioTransport(settings, threadPool, networkService, bigArrays, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService)); } + + @Override + public Map> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NamedXContentRegistry xContentRegistry, + NetworkService networkService, + HttpServerTransport.Dispatcher dispatcher) { + return Collections.singletonMap(NIO_HTTP_TRANSPORT_NAME, + () -> new NioHttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher)); + } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpReadWriteHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpReadWriteHandler.java new file mode 100644 index 0000000000000..f2d07b180855c --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpReadWriteHandler.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.nio.BytesWriteHandler; +import org.elasticsearch.nio.InboundChannelBuffer; +import org.elasticsearch.transport.TcpTransport; + +import java.io.IOException; + +public class TcpReadWriteHandler extends BytesWriteHandler { + + private final TcpNioSocketChannel channel; + private final TcpTransport transport; + + public TcpReadWriteHandler(TcpNioSocketChannel channel, TcpTransport transport) { + this.channel = channel; + this.transport = transport; + } + + @Override + public int consumeReads(InboundChannelBuffer channelBuffer) throws IOException { + BytesReference bytesReference = BytesReference.fromByteBuffers(channelBuffer.sliceBuffersTo(channelBuffer.getIndex())); + return transport.consumeNetworkReads(channel, bytesReference); + } +} diff --git a/plugins/transport-nio/src/main/plugin-metadata/plugin-security.policy b/plugins/transport-nio/src/main/plugin-metadata/plugin-security.policy index 2dbe07bd8a5c6..8c8fe7c327412 100644 --- a/plugins/transport-nio/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/transport-nio/src/main/plugin-metadata/plugin-security.policy @@ -21,3 +21,9 @@ grant codeBase "${codebase.elasticsearch-nio}" { // elasticsearch-nio makes and accepts socket connections permission java.net.SocketPermission "*", "accept,connect"; }; + +grant codeBase "${codebase.netty-common}" { + // This should only currently be required as we use the netty http client for tests + // netty makes and accepts socket connections + permission java.net.SocketPermission "*", "accept,connect"; +}; diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java new file mode 100644 index 0000000000000..dce8319d2fc82 --- /dev/null +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java @@ -0,0 +1,241 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpRequestEncoder; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpResponseDecoder; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpUtil; +import io.netty.handler.codec.http.HttpVersion; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.http.HttpHandlingSettings; +import org.elasticsearch.nio.FlushOperation; +import org.elasticsearch.nio.InboundChannelBuffer; +import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.nio.SocketChannelContext; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.function.BiConsumer; + +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; + +public class HttpReadWriteHandlerTests extends ESTestCase { + + private HttpReadWriteHandler handler; + private NioSocketChannel nioSocketChannel; + private NioHttpServerTransport transport; + + private final RequestEncoder requestEncoder = new RequestEncoder(); + private final ResponseDecoder responseDecoder = new ResponseDecoder(); + + @Before + @SuppressWarnings("unchecked") + public void setMocks() { + transport = mock(NioHttpServerTransport.class); + Settings settings = Settings.EMPTY; + ByteSizeValue maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.getDefault(settings); + ByteSizeValue maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.getDefault(settings); + ByteSizeValue maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.getDefault(settings); + HttpHandlingSettings httpHandlingSettings = new HttpHandlingSettings(1024, + Math.toIntExact(maxChunkSize.getBytes()), + Math.toIntExact(maxHeaderSize.getBytes()), + Math.toIntExact(maxInitialLineLength.getBytes()), + SETTING_HTTP_RESET_COOKIES.getDefault(settings), + SETTING_HTTP_COMPRESSION.getDefault(settings), + SETTING_HTTP_COMPRESSION_LEVEL.getDefault(settings), + SETTING_HTTP_DETAILED_ERRORS_ENABLED.getDefault(settings)); + ThreadContext threadContext = new ThreadContext(settings); + nioSocketChannel = mock(NioSocketChannel.class); + handler = new HttpReadWriteHandler(nioSocketChannel, transport, httpHandlingSettings, NamedXContentRegistry.EMPTY, threadContext); + } + + public void testSuccessfulDecodeHttpRequest() throws IOException { + String uri = "localhost:9090/" + randomAlphaOfLength(8); + HttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, uri); + + ByteBuf buf = requestEncoder.encode(httpRequest); + int slicePoint = randomInt(buf.writerIndex() - 1); + + ByteBuf slicedBuf = buf.retainedSlice(0, slicePoint); + ByteBuf slicedBuf2 = buf.retainedSlice(slicePoint, buf.writerIndex()); + handler.consumeReads(toChannelBuffer(slicedBuf)); + + verify(transport, times(0)).dispatchRequest(any(RestRequest.class), any(RestChannel.class)); + + handler.consumeReads(toChannelBuffer(slicedBuf2)); + + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(RestRequest.class); + verify(transport).dispatchRequest(requestCaptor.capture(), any(RestChannel.class)); + + NioHttpRequest nioHttpRequest = (NioHttpRequest) requestCaptor.getValue(); + FullHttpRequest nettyHttpRequest = nioHttpRequest.getRequest(); + assertEquals(httpRequest.protocolVersion(), nettyHttpRequest.protocolVersion()); + assertEquals(httpRequest.method(), nettyHttpRequest.method()); + } + + public void testDecodeHttpRequestError() throws IOException { + String uri = "localhost:9090/" + randomAlphaOfLength(8); + HttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, uri); + + ByteBuf buf = requestEncoder.encode(httpRequest); + buf.setByte(0, ' '); + buf.setByte(1, ' '); + buf.setByte(2, ' '); + + handler.consumeReads(toChannelBuffer(buf)); + + ArgumentCaptor exceptionCaptor = ArgumentCaptor.forClass(Throwable.class); + verify(transport).dispatchBadRequest(any(RestRequest.class), any(RestChannel.class), exceptionCaptor.capture()); + + assertTrue(exceptionCaptor.getValue() instanceof IllegalArgumentException); + } + + public void testDecodeHttpRequestContentLengthToLongGeneratesOutboundMessage() throws IOException { + String uri = "localhost:9090/" + randomAlphaOfLength(8); + HttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, uri, false); + HttpUtil.setContentLength(httpRequest, 1025); + HttpUtil.setKeepAlive(httpRequest, false); + + ByteBuf buf = requestEncoder.encode(httpRequest); + + handler.consumeReads(toChannelBuffer(buf)); + + verifyZeroInteractions(transport); + + List flushOperations = handler.pollFlushOperations(); + assertFalse(flushOperations.isEmpty()); + + FlushOperation flushOperation = flushOperations.get(0); + HttpResponse response = responseDecoder.decode(Unpooled.wrappedBuffer(flushOperation.getBuffersToWrite())); + assertEquals(HttpVersion.HTTP_1_1, response.protocolVersion()); + assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, response.status()); + + flushOperation.getListener().accept(null, null); + // Since we have keep-alive set to false, we should close the channel after the response has been + // flushed + verify(nioSocketChannel).close(); + } + + @SuppressWarnings("unchecked") + public void testEncodeHttpResponse() throws IOException { + prepareHandlerForResponse(handler); + + FullHttpResponse fullHttpResponse = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + + SocketChannelContext context = mock(SocketChannelContext.class); + HttpWriteOperation writeOperation = new HttpWriteOperation(context, fullHttpResponse, mock(BiConsumer.class)); + List flushOperations = handler.writeToBytes(writeOperation); + + HttpResponse response = responseDecoder.decode(Unpooled.wrappedBuffer(flushOperations.get(0).getBuffersToWrite())); + + assertEquals(HttpResponseStatus.OK, response.status()); + assertEquals(HttpVersion.HTTP_1_1, response.protocolVersion()); + } + + private FullHttpRequest prepareHandlerForResponse(HttpReadWriteHandler adaptor) throws IOException { + HttpMethod method = HttpMethod.GET; + HttpVersion version = HttpVersion.HTTP_1_1; + String uri = "http://localhost:9090/" + randomAlphaOfLength(8); + + HttpRequest request = new DefaultFullHttpRequest(version, method, uri); + ByteBuf buf = requestEncoder.encode(request); + + handler.consumeReads(toChannelBuffer(buf)); + + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(RestRequest.class); + verify(transport).dispatchRequest(requestCaptor.capture(), any(RestChannel.class)); + + NioHttpRequest nioHttpRequest = (NioHttpRequest) requestCaptor.getValue(); + FullHttpRequest requestParsed = nioHttpRequest.getRequest(); + assertNotNull(requestParsed); + assertEquals(requestParsed.method(), method); + assertEquals(requestParsed.protocolVersion(), version); + assertEquals(requestParsed.uri(), uri); + return requestParsed; + } + + private InboundChannelBuffer toChannelBuffer(ByteBuf buf) { + InboundChannelBuffer buffer = InboundChannelBuffer.allocatingInstance(); + int readableBytes = buf.readableBytes(); + buffer.ensureCapacity(readableBytes); + int bytesWritten = 0; + ByteBuffer[] byteBuffers = buffer.sliceBuffersTo(readableBytes); + int i = 0; + while (bytesWritten != readableBytes) { + ByteBuffer byteBuffer = byteBuffers[i++]; + int initialRemaining = byteBuffer.remaining(); + buf.readBytes(byteBuffer); + bytesWritten += initialRemaining - byteBuffer.remaining(); + } + buffer.incrementIndex(bytesWritten); + return buffer; + } + + private static class RequestEncoder { + + private final EmbeddedChannel requestEncoder = new EmbeddedChannel(new HttpRequestEncoder()); + + private ByteBuf encode(HttpRequest httpRequest) { + requestEncoder.writeOutbound(httpRequest); + return requestEncoder.readOutbound(); + } + } + + private static class ResponseDecoder { + + private final EmbeddedChannel responseDecoder = new EmbeddedChannel(new HttpResponseDecoder()); + + private HttpResponse decode(ByteBuf response) { + responseDecoder.writeInbound(response); + return responseDecoder.readInbound(); + } + } +} diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/Netty4HttpClient.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/Netty4HttpClient.java new file mode 100644 index 0000000000000..32f294f47ce9c --- /dev/null +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/Netty4HttpClient.java @@ -0,0 +1,200 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.bootstrap.Bootstrap; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpObject; +import io.netty.handler.codec.http.HttpObjectAggregator; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpRequestEncoder; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpResponseDecoder; +import io.netty.handler.codec.http.HttpVersion; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; + +import java.io.Closeable; +import java.net.SocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static io.netty.handler.codec.http.HttpHeaderNames.HOST; +import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; + +/** + * Tiny helper to send http requests over netty. + */ +class Netty4HttpClient implements Closeable { + + static Collection returnHttpResponseBodies(Collection responses) { + List list = new ArrayList<>(responses.size()); + for (FullHttpResponse response : responses) { + list.add(response.content().toString(StandardCharsets.UTF_8)); + } + return list; + } + + static Collection returnOpaqueIds(Collection responses) { + List list = new ArrayList<>(responses.size()); + for (HttpResponse response : responses) { + list.add(response.headers().get("X-Opaque-Id")); + } + return list; + } + + private final Bootstrap clientBootstrap; + + Netty4HttpClient() { + clientBootstrap = new Bootstrap().channel(NioSocketChannel.class).group(new NioEventLoopGroup()); + } + + public Collection get(SocketAddress remoteAddress, String... uris) throws InterruptedException { + Collection requests = new ArrayList<>(uris.length); + for (int i = 0; i < uris.length; i++) { + final HttpRequest httpRequest = new DefaultFullHttpRequest(HTTP_1_1, HttpMethod.GET, uris[i]); + httpRequest.headers().add(HOST, "localhost"); + httpRequest.headers().add("X-Opaque-ID", String.valueOf(i)); + requests.add(httpRequest); + } + return sendRequests(remoteAddress, requests); + } + + @SafeVarargs // Safe not because it doesn't do anything with the type parameters but because it won't leak them into other methods. + public final Collection post(SocketAddress remoteAddress, Tuple... urisAndBodies) + throws InterruptedException { + return processRequestsWithBody(HttpMethod.POST, remoteAddress, urisAndBodies); + } + + public final FullHttpResponse post(SocketAddress remoteAddress, FullHttpRequest httpRequest) throws InterruptedException { + Collection responses = sendRequests(remoteAddress, Collections.singleton(httpRequest)); + assert responses.size() == 1 : "expected 1 and only 1 http response"; + return responses.iterator().next(); + } + + @SafeVarargs // Safe not because it doesn't do anything with the type parameters but because it won't leak them into other methods. + public final Collection put(SocketAddress remoteAddress, Tuple... urisAndBodies) + throws InterruptedException { + return processRequestsWithBody(HttpMethod.PUT, remoteAddress, urisAndBodies); + } + + private Collection processRequestsWithBody(HttpMethod method, SocketAddress remoteAddress, Tuple... urisAndBodies) throws InterruptedException { + Collection requests = new ArrayList<>(urisAndBodies.length); + for (Tuple uriAndBody : urisAndBodies) { + ByteBuf content = Unpooled.copiedBuffer(uriAndBody.v2(), StandardCharsets.UTF_8); + HttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, method, uriAndBody.v1(), content); + request.headers().add(HttpHeaderNames.HOST, "localhost"); + request.headers().add(HttpHeaderNames.CONTENT_LENGTH, content.readableBytes()); + request.headers().add(HttpHeaderNames.CONTENT_TYPE, "application/json"); + requests.add(request); + } + return sendRequests(remoteAddress, requests); + } + + private synchronized Collection sendRequests( + final SocketAddress remoteAddress, + final Collection requests) throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(requests.size()); + final Collection content = Collections.synchronizedList(new ArrayList<>(requests.size())); + + clientBootstrap.handler(new CountDownLatchHandler(latch, content)); + + ChannelFuture channelFuture = null; + try { + channelFuture = clientBootstrap.connect(remoteAddress); + channelFuture.sync(); + + for (HttpRequest request : requests) { + channelFuture.channel().writeAndFlush(request); + } + latch.await(30, TimeUnit.SECONDS); + + } finally { + if (channelFuture != null) { + channelFuture.channel().close().sync(); + } + } + + return content; + } + + @Override + public void close() { + clientBootstrap.config().group().shutdownGracefully().awaitUninterruptibly(); + } + + /** + * helper factory which adds returned data to a list and uses a count down latch to decide when done + */ + private static class CountDownLatchHandler extends ChannelInitializer { + + private final CountDownLatch latch; + private final Collection content; + + CountDownLatchHandler(final CountDownLatch latch, final Collection content) { + this.latch = latch; + this.content = content; + } + + @Override + protected void initChannel(SocketChannel ch) throws Exception { + final int maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt(); + ch.pipeline().addLast(new HttpResponseDecoder()); + ch.pipeline().addLast(new HttpRequestEncoder()); + ch.pipeline().addLast(new HttpObjectAggregator(maxContentLength)); + ch.pipeline().addLast(new SimpleChannelInboundHandler() { + @Override + protected void channelRead0(ChannelHandlerContext ctx, HttpObject msg) throws Exception { + final FullHttpResponse response = (FullHttpResponse) msg; + content.add(response.copy()); + latch.countDown(); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + super.exceptionCaught(ctx, cause); + latch.countDown(); + } + }); + } + + } + +} diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NettyAdaptorTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NettyAdaptorTests.java new file mode 100644 index 0000000000000..d6944a5f510e2 --- /dev/null +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NettyAdaptorTests.java @@ -0,0 +1,177 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.ChannelOutboundHandlerAdapter; +import io.netty.channel.ChannelPromise; +import io.netty.channel.SimpleChannelInboundHandler; +import org.elasticsearch.nio.FlushOperation; +import org.elasticsearch.test.ESTestCase; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Locale; +import java.util.concurrent.atomic.AtomicBoolean; + +public class NettyAdaptorTests extends ESTestCase { + + public void testBasicRead() { + TenIntsToStringsHandler handler = new TenIntsToStringsHandler(); + NettyAdaptor nettyAdaptor = new NettyAdaptor(handler); + ByteBuffer message = ByteBuffer.allocate(40); + for (int i = 0; i < 10; ++i) { + message.putInt(i); + } + message.flip(); + ByteBuffer[] buffers = {message}; + assertEquals(40, nettyAdaptor.read(buffers)); + assertEquals("0123456789", handler.result); + } + + public void testBasicReadWithExcessData() { + TenIntsToStringsHandler handler = new TenIntsToStringsHandler(); + NettyAdaptor nettyAdaptor = new NettyAdaptor(handler); + ByteBuffer message = ByteBuffer.allocate(52); + for (int i = 0; i < 13; ++i) { + message.putInt(i); + } + message.flip(); + ByteBuffer[] buffers = {message}; + assertEquals(40, nettyAdaptor.read(buffers)); + assertEquals("0123456789", handler.result); + } + + public void testUncaughtReadExceptionsBubbleUp() { + NettyAdaptor nettyAdaptor = new NettyAdaptor(new TenIntsToStringsHandler()); + ByteBuffer message = ByteBuffer.allocate(40); + for (int i = 0; i < 9; ++i) { + message.putInt(i); + } + message.flip(); + ByteBuffer[] buffers = {message}; + expectThrows(IllegalStateException.class, () -> nettyAdaptor.read(buffers)); + } + + public void testWriteInsidePipelineIsCaptured() { + TenIntsToStringsHandler tenIntsToStringsHandler = new TenIntsToStringsHandler(); + PromiseCheckerHandler promiseCheckerHandler = new PromiseCheckerHandler(); + NettyAdaptor nettyAdaptor = new NettyAdaptor(new CapitalizeWriteHandler(), + promiseCheckerHandler, + new WriteInMiddleHandler(), + tenIntsToStringsHandler); + byte[] bytes = "SHOULD_WRITE".getBytes(StandardCharsets.UTF_8); + ByteBuffer message = ByteBuffer.wrap(bytes); + ByteBuffer[] buffers = {message}; + assertNull(nettyAdaptor.pollOutboundOperation()); + nettyAdaptor.read(buffers); + assertFalse(tenIntsToStringsHandler.wasCalled); + FlushOperation flushOperation = nettyAdaptor.pollOutboundOperation(); + assertNotNull(flushOperation); + assertEquals("FAILED", Unpooled.wrappedBuffer(flushOperation.getBuffersToWrite()).toString(StandardCharsets.UTF_8)); + assertFalse(promiseCheckerHandler.isCalled.get()); + flushOperation.getListener().accept(null, null); + assertTrue(promiseCheckerHandler.isCalled.get()); + } + + public void testCloseListener() { + AtomicBoolean listenerCalled = new AtomicBoolean(false); + CloseChannelHandler handler = new CloseChannelHandler(); + NettyAdaptor nettyAdaptor = new NettyAdaptor(handler); + byte[] bytes = "SHOULD_CLOSE".getBytes(StandardCharsets.UTF_8); + ByteBuffer[] buffers = {ByteBuffer.wrap(bytes)}; + nettyAdaptor.addCloseListener((v, e) -> listenerCalled.set(true)); + assertFalse(listenerCalled.get()); + nettyAdaptor.read(buffers); + assertTrue(listenerCalled.get()); + + } + + private class TenIntsToStringsHandler extends SimpleChannelInboundHandler { + + private String result; + boolean wasCalled = false; + + @Override + protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Exception { + wasCalled = true; + if (msg.readableBytes() < 10 * 4) { + throw new IllegalStateException("Must have ten ints"); + } + StringBuilder builder = new StringBuilder(); + for (int i = 0; i < 10; ++i) { + builder.append(msg.readInt()); + } + result = builder.toString(); + } + } + + private class WriteInMiddleHandler extends ChannelInboundHandlerAdapter { + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { + ByteBuf buffer = (ByteBuf) msg; + String bufferString = buffer.toString(StandardCharsets.UTF_8); + if (bufferString.equals("SHOULD_WRITE")) { + ctx.writeAndFlush("Failed"); + } else { + throw new IllegalArgumentException("Only accept SHOULD_WRITE message"); + } + } + } + + private class CapitalizeWriteHandler extends ChannelOutboundHandlerAdapter { + + @Override + public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { + String string = (String) msg; + assert string.equals("Failed") : "Should be the same was what we wrote."; + super.write(ctx, Unpooled.wrappedBuffer(string.toUpperCase(Locale.ROOT).getBytes(StandardCharsets.UTF_8)), promise); + } + } + + private class PromiseCheckerHandler extends ChannelOutboundHandlerAdapter { + + private AtomicBoolean isCalled = new AtomicBoolean(false); + + @Override + public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { + promise.addListener((f) -> isCalled.set(true)); + super.write(ctx, msg, promise); + } + } + + private class CloseChannelHandler extends ChannelInboundHandlerAdapter { + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { + ByteBuf buffer = (ByteBuf) msg; + String bufferString = buffer.toString(StandardCharsets.UTF_8); + if (bufferString.equals("SHOULD_CLOSE")) { + ctx.close(); + } else { + throw new IllegalArgumentException("Only accept SHOULD_CLOSE message"); + } + } + } +} diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java new file mode 100644 index 0000000000000..4741bd69a527a --- /dev/null +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java @@ -0,0 +1,353 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.TooLongFrameException; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaderValues; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpUtil; +import io.netty.handler.codec.http.HttpVersion; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.http.BindHttpException; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.http.HttpTransportSettings; +import org.elasticsearch.http.NullDispatcher; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.rest.RestStatus.BAD_REQUEST; +import static org.elasticsearch.rest.RestStatus.OK; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +/** + * Tests for the {@link NioHttpServerTransport} class. + */ +public class NioHttpServerTransportTests extends ESTestCase { + + private NetworkService networkService; + private ThreadPool threadPool; + private MockBigArrays bigArrays; + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Collections.emptyList()); + threadPool = new TestThreadPool("test"); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + } + + @After + public void shutdown() throws Exception { + if (threadPool != null) { + threadPool.shutdownNow(); + } + threadPool = null; + networkService = null; + bigArrays = null; + } + +// public void testCorsConfig() { +// final Set methods = new HashSet<>(Arrays.asList("get", "options", "post")); +// final Set headers = new HashSet<>(Arrays.asList("Content-Type", "Content-Length")); +// final String prefix = randomBoolean() ? " " : ""; // sometimes have a leading whitespace between comma delimited elements +// final Settings settings = Settings.builder() +// .put(SETTING_CORS_ENABLED.getKey(), true) +// .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "*") +// .put(SETTING_CORS_ALLOW_METHODS.getKey(), collectionToDelimitedString(methods, ",", prefix, "")) +// .put(SETTING_CORS_ALLOW_HEADERS.getKey(), collectionToDelimitedString(headers, ",", prefix, "")) +// .put(SETTING_CORS_ALLOW_CREDENTIALS.getKey(), true) +// .build(); +// final Netty4CorsConfig corsConfig = Netty4HttpServerTransport.buildCorsConfig(settings); +// assertTrue(corsConfig.isAnyOriginSupported()); +// assertEquals(headers, corsConfig.allowedRequestHeaders()); +// assertEquals(methods, corsConfig.allowedRequestMethods().stream().map(HttpMethod::name).collect(Collectors.toSet())); +// } + +// public void testCorsConfigWithDefaults() { +// final Set methods = Strings.commaDelimitedListToSet(SETTING_CORS_ALLOW_METHODS.getDefault(Settings.EMPTY)); +// final Set headers = Strings.commaDelimitedListToSet(SETTING_CORS_ALLOW_HEADERS.getDefault(Settings.EMPTY)); +// final long maxAge = SETTING_CORS_MAX_AGE.getDefault(Settings.EMPTY); +// final Settings settings = Settings.builder().put(SETTING_CORS_ENABLED.getKey(), true).build(); +// final Netty4CorsConfig corsConfig = Netty4HttpServerTransport.buildCorsConfig(settings); +// assertFalse(corsConfig.isAnyOriginSupported()); +// assertEquals(Collections.emptySet(), corsConfig.origins().get()); +// assertEquals(headers, corsConfig.allowedRequestHeaders()); +// assertEquals(methods, corsConfig.allowedRequestMethods().stream().map(HttpMethod::name).collect(Collectors.toSet())); +// assertEquals(maxAge, corsConfig.maxAge()); +// assertFalse(corsConfig.isCredentialsAllowed()); +// } + + /** + * Test that {@link NioHttpServerTransport} supports the "Expect: 100-continue" HTTP header + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectContinueHeader() throws InterruptedException { + final Settings settings = Settings.EMPTY; + final int contentLength = randomIntBetween(1, HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings).bytesAsInt()); + runExpectHeaderTest(settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.CONTINUE); + } + + /** + * Test that {@link NioHttpServerTransport} responds to a + * 100-continue expectation with too large a content-length + * with a 413 status. + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectContinueHeaderContentLengthTooLong() throws InterruptedException { + final String key = HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.getKey(); + final int maxContentLength = randomIntBetween(1, 104857600); + final Settings settings = Settings.builder().put(key, maxContentLength + "b").build(); + final int contentLength = randomIntBetween(maxContentLength + 1, Integer.MAX_VALUE); + runExpectHeaderTest( + settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE); + } + + /** + * Test that {@link NioHttpServerTransport} responds to an unsupported expectation with a 417 status. + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectUnsupportedExpectation() throws InterruptedException { + runExpectHeaderTest(Settings.EMPTY, "chocolate=yummy", 0, HttpResponseStatus.EXPECTATION_FAILED); + } + + private void runExpectHeaderTest( + final Settings settings, + final String expectation, + final int contentLength, + final HttpResponseStatus expectedStatus) throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { + channel.sendResponse(new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, new BytesArray("done"))); + } + + @Override + public void dispatchBadRequest(RestRequest request, RestChannel channel, ThreadContext threadContext, Throwable cause) { + throw new AssertionError(); + } + }; + try (NioHttpServerTransport transport = new NioHttpServerTransport(settings, networkService, bigArrays, threadPool, + xContentRegistry(), dispatcher)) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + try (Netty4HttpClient client = new Netty4HttpClient()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); + request.headers().set(HttpHeaderNames.EXPECT, expectation); + HttpUtil.setContentLength(request, contentLength); + + final FullHttpResponse response = client.post(remoteAddress.address(), request); + assertThat(response.status(), equalTo(expectedStatus)); + if (expectedStatus.equals(HttpResponseStatus.CONTINUE)) { + final FullHttpRequest continuationRequest = + new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", Unpooled.EMPTY_BUFFER); + final FullHttpResponse continuationResponse = client.post(remoteAddress.address(), continuationRequest); + + assertThat(continuationResponse.status(), is(HttpResponseStatus.OK)); + assertThat(new String(ByteBufUtil.getBytes(continuationResponse.content()), StandardCharsets.UTF_8), is("done")); + } + } + } + } + + public void testBindUnavailableAddress() { + try (NioHttpServerTransport transport = new NioHttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool, + xContentRegistry(), new NullDispatcher())) { + transport.start(); + TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + Settings settings = Settings.builder().put("http.port", remoteAddress.getPort()).build(); + try (NioHttpServerTransport otherTransport = new NioHttpServerTransport(settings, networkService, bigArrays, threadPool, + xContentRegistry(), new NullDispatcher())) { + BindHttpException bindHttpException = expectThrows(BindHttpException.class, () -> otherTransport.start()); + assertEquals("Failed to bind to [" + remoteAddress.getPort() + "]", bindHttpException.getMessage()); + } + } + } + + public void testBadRequest() throws InterruptedException { + final AtomicReference causeReference = new AtomicReference<>(); + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + throw new AssertionError(); + } + + @Override + public void dispatchBadRequest(final RestRequest request, + final RestChannel channel, + final ThreadContext threadContext, + final Throwable cause) { + causeReference.set(cause); + try { + final ElasticsearchException e = new ElasticsearchException("you sent a bad request and you should feel bad"); + channel.sendResponse(new BytesRestResponse(channel, BAD_REQUEST, e)); + } catch (final IOException e) { + throw new AssertionError(e); + } + } + + }; + + final Settings settings; + final int maxInitialLineLength; + final Setting httpMaxInitialLineLengthSetting = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; + if (randomBoolean()) { + maxInitialLineLength = httpMaxInitialLineLengthSetting.getDefault(Settings.EMPTY).bytesAsInt(); + settings = Settings.EMPTY; + } else { + maxInitialLineLength = randomIntBetween(1, 8192); + settings = Settings.builder().put(httpMaxInitialLineLengthSetting.getKey(), maxInitialLineLength + "b").build(); + } + + try (NioHttpServerTransport transport = + new NioHttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher)) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (Netty4HttpClient client = new Netty4HttpClient()) { + final String url = "/" + new String(new byte[maxInitialLineLength], Charset.forName("UTF-8")); + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); + + final FullHttpResponse response = client.post(remoteAddress.address(), request); + assertThat(response.status(), equalTo(HttpResponseStatus.BAD_REQUEST)); + assertThat( + new String(response.content().array(), Charset.forName("UTF-8")), + containsString("you sent a bad request and you should feel bad")); + } + } + + assertNotNull(causeReference.get()); + assertThat(causeReference.get(), instanceOf(TooLongFrameException.class)); + } + + public void testDispatchDoesNotModifyThreadContext() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + threadContext.putHeader("foo", "bar"); + threadContext.putTransient("bar", "baz"); + } + + @Override + public void dispatchBadRequest(final RestRequest request, + final RestChannel channel, + final ThreadContext threadContext, + final Throwable cause) { + threadContext.putHeader("foo_bad", "bar"); + threadContext.putTransient("bar_bad", "baz"); + } + + }; + + try (NioHttpServerTransport transport = + new NioHttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher)) { + transport.start(); + + transport.dispatchRequest(null, null); + assertNull(threadPool.getThreadContext().getHeader("foo")); + assertNull(threadPool.getThreadContext().getTransient("bar")); + + transport.dispatchBadRequest(null, null, null); + assertNull(threadPool.getThreadContext().getHeader("foo_bad")); + assertNull(threadPool.getThreadContext().getTransient("bar_bad")); + } + } + +// public void testReadTimeout() throws Exception { +// final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { +// +// @Override +// public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { +// throw new AssertionError("Should not have received a dispatched request"); +// } +// +// @Override +// public void dispatchBadRequest(final RestRequest request, +// final RestChannel channel, +// final ThreadContext threadContext, +// final Throwable cause) { +// throw new AssertionError("Should not have received a dispatched request"); +// } +// +// }; +// +// Settings settings = Settings.builder() +// .put(HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT.getKey(), new TimeValue(randomIntBetween(100, 300))) +// .build(); +// +// +// NioEventLoopGroup group = new NioEventLoopGroup(); +// try (NioHttpServerTransport transport = +// new NioHttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher)) { +// transport.start(); +// final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); +// +// AtomicBoolean channelClosed = new AtomicBoolean(false); +// +// Bootstrap clientBootstrap = new Bootstrap().channel(NioSocketChannel.class).handler(new ChannelInitializer() { +// +// @Override +// protected void initChannel(SocketChannel ch) { +// ch.pipeline().addLast(new ChannelHandlerAdapter() {}); +// +// } +// }).group(group); +// ChannelFuture connect = clientBootstrap.connect(remoteAddress.address()); +// connect.channel().closeFuture().addListener(future -> channelClosed.set(true)); +// +// assertBusy(() -> assertTrue("Channel should be closed due to read timeout", channelClosed.get()), 5, TimeUnit.SECONDS); +// +// } finally { +// group.shutdownGracefully().await(); +// } +// } +} diff --git a/qa/build.gradle b/qa/build.gradle index 494f6e3cd94b7..709c309359ecf 100644 --- a/qa/build.gradle +++ b/qa/build.gradle @@ -4,7 +4,7 @@ import org.elasticsearch.gradle.test.RestIntegTestTask subprojects { Project subproj -> subproj.tasks.withType(RestIntegTestTask) { subproj.extensions.configure("${it.name}Cluster") { cluster -> - cluster.distribution = 'oss-zip' + cluster.distribution = System.getProperty('tests.distribution', 'oss-zip') } } } diff --git a/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java index c373adb5d743d..f7b87905b24d5 100644 --- a/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -35,6 +35,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -134,7 +135,7 @@ public void testSearchSkipUnavailable() throws IOException { for (int i = 0; i < 10; i++) { restHighLevelClient.index(new IndexRequest("index", "doc", String.valueOf(i)).source("field", "value")); } - Response refreshResponse = client().performRequest("POST", "/index/_refresh"); + Response refreshResponse = client().performRequest(new Request("POST", "/index/_refresh")); assertEquals(200, refreshResponse.getStatusLine().getStatusCode()); { @@ -223,10 +224,11 @@ public void testSkipUnavailableDependsOnSeeds() throws IOException { { //check that skip_unavailable alone cannot be set - HttpEntity clusterSettingsEntity = buildUpdateSettingsRequestBody( - Collections.singletonMap("skip_unavailable", randomBoolean())); + Request request = new Request("PUT", "/_cluster/settings"); + request.setEntity(buildUpdateSettingsRequestBody( + Collections.singletonMap("skip_unavailable", randomBoolean()))); ResponseException responseException = expectThrows(ResponseException.class, - () -> client().performRequest("PUT", "/_cluster/settings", Collections.emptyMap(), clusterSettingsEntity)); + () -> client().performRequest(request)); assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode()); assertThat(responseException.getMessage(), containsString("Missing required setting [search.remote.remote1.seeds] " + @@ -240,9 +242,10 @@ public void testSkipUnavailableDependsOnSeeds() throws IOException { { //check that seeds cannot be reset alone if skip_unavailable is set - HttpEntity clusterSettingsEntity = buildUpdateSettingsRequestBody(Collections.singletonMap("seeds", null)); + Request request = new Request("PUT", "/_cluster/settings"); + request.setEntity(buildUpdateSettingsRequestBody(Collections.singletonMap("seeds", null))); ResponseException responseException = expectThrows(ResponseException.class, - () -> client().performRequest("PUT", "/_cluster/settings", Collections.emptyMap(), clusterSettingsEntity)); + () -> client().performRequest(request)); assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode()); assertThat(responseException.getMessage(), containsString("Missing required setting [search.remote.remote1.seeds] " + "for setting [search.remote.remote1.skip_unavailable]")); @@ -284,8 +287,9 @@ private static void assertSearchConnectFailure() { private static void updateRemoteClusterSettings(Map settings) throws IOException { - HttpEntity clusterSettingsEntity = buildUpdateSettingsRequestBody(settings); - Response response = client().performRequest("PUT", "/_cluster/settings", Collections.emptyMap(), clusterSettingsEntity); + Request request = new Request("PUT", "/_cluster/settings"); + request.setEntity(buildUpdateSettingsRequestBody(settings)); + Response response = client().performRequest(request); assertEquals(200, response.getStatusLine().getStatusCode()); } diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index 4e69a478562a7..992d3ce71f623 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -21,6 +21,7 @@ import org.apache.http.ConnectionClosedException; import org.apache.lucene.util.Constants; +import org.elasticsearch.client.Request; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.test.rest.ESRestTestCase; import org.hamcrest.Matcher; @@ -51,7 +52,8 @@ public void testDieWithDignity() throws Exception { assertThat(pidFileLines, hasSize(1)); final int pid = Integer.parseInt(pidFileLines.get(0)); Files.delete(pidFile); - IOException e = expectThrows(IOException.class, () -> client().performRequest("GET", "/_die_with_dignity")); + IOException e = expectThrows(IOException.class, + () -> client().performRequest(new Request("GET", "/_die_with_dignity"))); Matcher failureMatcher = instanceOf(ConnectionClosedException.class); if (Constants.WINDOWS) { /* diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index d335ac982fd8a..da99bbb4c8036 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -54,9 +54,16 @@ for (Version version : bwcVersions.wireCompatible) { bwcTest.dependsOn(versionBwcTest) } - /* To support taking index snapshots, we have to set path.repo setting */ tasks.getByName("${baseName}#mixedClusterTestRunner").configure { + /* To support taking index snapshots, we have to set path.repo setting */ systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") + if ('zip'.equals(extension.distribution)) { + systemProperty 'tests.rest.blacklist', [ + 'cat.templates/10_basic/No templates', + 'cat.templates/10_basic/Sort templates', + 'cat.templates/10_basic/Multiple template', + ].join(',') + } } } diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java index a38ff284a1a05..22a3fa65eece0 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java @@ -19,9 +19,8 @@ package org.elasticsearch.backwards; import org.apache.http.HttpHost; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.elasticsearch.Version; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -34,25 +33,21 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; public class IndexingIT extends ESRestTestCase { private int indexDocs(String index, final int idStart, final int numDocs) throws IOException { for (int i = 0; i < numDocs; i++) { final int id = idStart + i; - assertOK(client().performRequest("PUT", index + "/test/" + id, emptyMap(), - new StringEntity("{\"test\": \"test_" + randomAsciiOfLength(2) + "\"}", ContentType.APPLICATION_JSON))); + Request request = new Request("PUT", index + "/test/" + id); + request.setJsonEntity("{\"test\": \"test_" + randomAlphaOfLength(2) + "\"}"); + assertOK(client().performRequest(request)); } return numDocs; } @@ -105,7 +100,7 @@ public void testIndexVersionPropagation() throws Exception { logger.info("allowing shards on all nodes"); updateIndexSettings(index, Settings.builder().putNull("index.routing.allocation.include._name")); ensureGreen(index); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); List shards = buildShards(index, nodes, newNodeClient); Shard primary = buildShards(index, nodes, newNodeClient).stream().filter(Shard::isPrimary).findFirst().get(); logger.info("primary resolved to: " + primary.getNode().getNodeName()); @@ -117,7 +112,7 @@ public void testIndexVersionPropagation() throws Exception { nUpdates = randomIntBetween(minUpdates, maxUpdates); logger.info("indexing docs with [{}] concurrent updates after allowing shards on all nodes", nUpdates); final int finalVersionForDoc2 = indexDocWithConcurrentUpdates(index, 2, nUpdates); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); shards = buildShards(index, nodes, newNodeClient); primary = shards.stream().filter(Shard::isPrimary).findFirst().get(); logger.info("primary resolved to: " + primary.getNode().getNodeName()); @@ -133,7 +128,7 @@ public void testIndexVersionPropagation() throws Exception { nUpdates = randomIntBetween(minUpdates, maxUpdates); logger.info("indexing docs with [{}] concurrent updates after moving primary", nUpdates); final int finalVersionForDoc3 = indexDocWithConcurrentUpdates(index, 3, nUpdates); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); shards = buildShards(index, nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 3, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc3); @@ -146,7 +141,7 @@ public void testIndexVersionPropagation() throws Exception { nUpdates = randomIntBetween(minUpdates, maxUpdates); logger.info("indexing doc with [{}] concurrent updates after setting number of replicas to 0", nUpdates); final int finalVersionForDoc4 = indexDocWithConcurrentUpdates(index, 4, nUpdates); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); shards = buildShards(index, nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 4, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc4); @@ -159,7 +154,7 @@ public void testIndexVersionPropagation() throws Exception { nUpdates = randomIntBetween(minUpdates, maxUpdates); logger.info("indexing doc with [{}] concurrent updates after setting number of replicas to 1", nUpdates); final int finalVersionForDoc5 = indexDocWithConcurrentUpdates(index, 5, nUpdates); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); shards = buildShards(index, nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 5, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc5); @@ -191,7 +186,7 @@ public void testSeqNoCheckpoints() throws Exception { logger.info("allowing shards on all nodes"); updateIndexSettings(index, Settings.builder().putNull("index.routing.allocation.include._name")); ensureGreen(index); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); for (final String bwcName : bwcNamesList) { assertCount(index, "_only_nodes:" + bwcName, numDocs); } @@ -222,7 +217,7 @@ public void testSeqNoCheckpoints() throws Exception { logger.info("setting number of replicas to 1"); updateIndexSettings(index, Settings.builder().put("index.number_of_replicas", 1)); ensureGreen(index); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); for (Shard shard : buildShards(index, nodes, newNodeClient)) { assertCount(index, "_only_nodes:" + shard.node.nodeName, numDocs); @@ -237,20 +232,18 @@ public void testUpdateSnapshotStatus() throws Exception { logger.info("cluster discovered: {}", nodes.toString()); // Create the repository before taking the snapshot. - String repoConfig = Strings + Request request = new Request("PUT", "/_snapshot/repo"); + request.setJsonEntity(Strings .toString(JsonXContent.contentBuilder() .startObject() - .field("type", "fs") - .startObject("settings") - .field("compress", randomBoolean()) - .field("location", System.getProperty("tests.path.repo")) - .endObject() - .endObject()); - - assertOK( - client().performRequest("PUT", "/_snapshot/repo", emptyMap(), - new StringEntity(repoConfig, ContentType.APPLICATION_JSON)) - ); + .field("type", "fs") + .startObject("settings") + .field("compress", randomBoolean()) + .field("location", System.getProperty("tests.path.repo")) + .endObject() + .endObject())); + + assertOK(client().performRequest(request)); String bwcNames = nodes.getBWCNodes().stream().map(Node::getNodeName).collect(Collectors.joining(",")); @@ -264,34 +257,36 @@ public void testUpdateSnapshotStatus() throws Exception { createIndex(index, settings.build()); indexDocs(index, 0, between(50, 100)); ensureGreen(index); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); - assertOK( - client().performRequest("PUT", "/_snapshot/repo/bwc-snapshot", singletonMap("wait_for_completion", "true"), - new StringEntity("{\"indices\": \"" + index + "\"}", ContentType.APPLICATION_JSON)) - ); + request = new Request("PUT", "/_snapshot/repo/bwc-snapshot"); + request.addParameter("wait_for_completion", "true"); + request.setJsonEntity("{\"indices\": \"" + index + "\"}"); + assertOK(client().performRequest(request)); // Allocating shards on all nodes, taking snapshots should happen on all nodes. updateIndexSettings(index, Settings.builder().putNull("index.routing.allocation.include._name")); ensureGreen(index); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); - assertOK( - client().performRequest("PUT", "/_snapshot/repo/mixed-snapshot", singletonMap("wait_for_completion", "true"), - new StringEntity("{\"indices\": \"" + index + "\"}", ContentType.APPLICATION_JSON)) - ); + request = new Request("PUT", "/_snapshot/repo/mixed-snapshot"); + request.addParameter("wait_for_completion", "true"); + request.setJsonEntity("{\"indices\": \"" + index + "\"}"); } private void assertCount(final String index, final String preference, final int expectedCount) throws IOException { - final Response response = client().performRequest("GET", index + "/_count", Collections.singletonMap("preference", preference)); + Request request = new Request("GET", index + "/_count"); + request.addParameter("preference", preference); + final Response response = client().performRequest(request); assertOK(response); final int actualCount = Integer.parseInt(ObjectPath.createFromResponse(response).evaluate("count").toString()); assertThat(actualCount, equalTo(expectedCount)); } private void assertVersion(final String index, final int docId, final String preference, final int expectedVersion) throws IOException { - final Response response = client().performRequest("GET", index + "/test/" + docId, - Collections.singletonMap("preference", preference)); + Request request = new Request("GET", index + "/test/" + docId); + request.addParameter("preference", preference); + final Response response = client().performRequest(request); assertOK(response); final int actualVersion = Integer.parseInt(ObjectPath.createFromResponse(response).evaluate("_version").toString()); assertThat("version mismatch for doc [" + docId + "] preference [" + preference + "]", actualVersion, equalTo(expectedVersion)); @@ -323,7 +318,9 @@ private void assertSeqNoOnShards(String index, Nodes nodes, int numDocs, RestCli } private List buildShards(String index, Nodes nodes, RestClient client) throws IOException { - Response response = client.performRequest("GET", index + "/_stats", singletonMap("level", "shards")); + Request request = new Request("GET", index + "/_stats"); + request.addParameter("level", "shards"); + Response response = client.performRequest(request); List shardStats = ObjectPath.createFromResponse(response).evaluate("indices." + index + ".shards.0"); ArrayList shards = new ArrayList<>(); for (Object shard : shardStats) { @@ -341,7 +338,7 @@ private List buildShards(String index, Nodes nodes, RestClient client) th } private Nodes buildNodeAndVersions() throws IOException { - Response response = client().performRequest("GET", "_nodes"); + Response response = client().performRequest(new Request("GET", "_nodes")); ObjectPath objectPath = ObjectPath.createFromResponse(response); Map nodesAsMap = objectPath.evaluate("nodes"); Nodes nodes = new Nodes(); @@ -352,7 +349,7 @@ private Nodes buildNodeAndVersions() throws IOException { Version.fromString(objectPath.evaluate("nodes." + id + ".version")), HttpHost.create(objectPath.evaluate("nodes." + id + ".http.publish_address")))); } - response = client().performRequest("GET", "_cluster/state"); + response = client().performRequest(new Request("GET", "_cluster/state")); nodes.setMasterNodeId(ObjectPath.createFromResponse(response).evaluate("master_node")); return nodes; } diff --git a/qa/query-builder-bwc/src/test/java/org/elasticsearch/bwc/QueryBuilderBWCIT.java b/qa/query-builder-bwc/src/test/java/org/elasticsearch/bwc/QueryBuilderBWCIT.java index f3e03f006c5aa..2d3f55ab94bb4 100644 --- a/qa/query-builder-bwc/src/test/java/org/elasticsearch/bwc/QueryBuilderBWCIT.java +++ b/qa/query-builder-bwc/src/test/java/org/elasticsearch/bwc/QueryBuilderBWCIT.java @@ -19,10 +19,9 @@ package org.elasticsearch.bwc; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; @@ -189,13 +188,15 @@ public void testQueryBuilderBWC() throws Exception { mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); - Response rsp = client().performRequest("PUT", "/" + index, Collections.emptyMap(), - new StringEntity(Strings.toString(mappingsAndSettings), ContentType.APPLICATION_JSON)); + Request request = new Request("PUT", "/" + index); + request.setJsonEntity(Strings.toString(mappingsAndSettings)); + Response rsp = client().performRequest(request); assertEquals(200, rsp.getStatusLine().getStatusCode()); for (int i = 0; i < CANDIDATES.size(); i++) { - rsp = client().performRequest("PUT", "/" + index + "/doc/" + Integer.toString(i), Collections.emptyMap(), - new StringEntity((String) CANDIDATES.get(i)[0], ContentType.APPLICATION_JSON)); + request = new Request("PUT", "/" + index + "/doc/" + Integer.toString(i)); + request.setJsonEntity((String) CANDIDATES.get(i)[0]); + rsp = client().performRequest(request); assertEquals(201, rsp.getStatusLine().getStatusCode()); } } else { @@ -204,9 +205,10 @@ public void testQueryBuilderBWC() throws Exception { for (int i = 0; i < CANDIDATES.size(); i++) { QueryBuilder expectedQueryBuilder = (QueryBuilder) CANDIDATES.get(i)[1]; - Response rsp = client().performRequest("GET", "/" + index + "/_search", Collections.emptyMap(), - new StringEntity("{\"query\": {\"ids\": {\"values\": [\"" + Integer.toString(i) + "\"]}}, " + - "\"docvalue_fields\" : [\"query.query_builder_field\"]}", ContentType.APPLICATION_JSON)); + Request request = new Request("GET", "/" + index + "/_search"); + request.setJsonEntity("{\"query\": {\"ids\": {\"values\": [\"" + Integer.toString(i) + "\"]}}, " + + "\"docvalue_fields\" : [\"query.query_builder_field\"]}"); + Response rsp = client().performRequest(request); assertEquals(200, rsp.getStatusLine().getStatusCode()); Map hitRsp = (Map) ((List) ((Map)toMap(rsp).get("hits")).get("hits")).get(0); String queryBuilderStr = (String) ((List) ((Map) hitRsp.get("fields")).get("query.query_builder_field")).get(0); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java index 965f94607aebb..bfa856e381b12 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java @@ -31,11 +31,11 @@ import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.client.Client; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentType; @@ -221,8 +221,10 @@ public void testThatMoreLikeThisQueryMultiTermVectorRequestContainsContextAndHea public void testThatRelevantHttpHeadersBecomeRequestHeaders() throws IOException { final String IRRELEVANT_HEADER = "SomeIrrelevantHeader"; - Response response = getRestClient().performRequest("GET", "/" + queryIndex + "/_search", - new BasicHeader(CUSTOM_HEADER, randomHeaderValue), new BasicHeader(IRRELEVANT_HEADER, randomHeaderValue)); + Request request = new Request("GET", "/" + queryIndex + "/_search"); + request.setHeaders(new BasicHeader(CUSTOM_HEADER, randomHeaderValue), + new BasicHeader(IRRELEVANT_HEADER, randomHeaderValue)); + Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); List searchRequests = getRequests(SearchRequest.class); assertThat(searchRequests, hasSize(greaterThan(0))); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java index bdda44c1b7118..4ab64abda453b 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java @@ -20,8 +20,8 @@ package org.elasticsearch.http; import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; -import org.elasticsearch.test.ESIntegTestCase; import java.io.IOException; @@ -32,15 +32,16 @@ public class CorsNotSetIT extends HttpSmokeTestCase { public void testCorsSettingDefaultBehaviourDoesNotReturnAnything() throws IOException { String corsValue = "http://localhost:9200"; - Response response = getRestClient().performRequest("GET", "/", - new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue)); + Request request = new Request("GET", "/"); + request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue)); + Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); assertThat(response.getHeader("Access-Control-Allow-Credentials"), nullValue()); } public void testThatOmittingCorsHeaderDoesNotReturnAnything() throws IOException { - Response response = getRestClient().performRequest("GET", "/"); + Response response = getRestClient().performRequest(new Request("GET", "/")); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); assertThat(response.getHeader("Access-Control-Allow-Credentials"), nullValue()); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java index 47215ae669b31..da48e51b63bbe 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java @@ -19,9 +19,9 @@ package org.elasticsearch.http; import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -54,21 +54,26 @@ protected Settings nodeSettings(int nodeOrdinal) { public void testThatRegularExpressionWorksOnMatch() throws IOException { String corsValue = "http://localhost:9200"; - Response response = getRestClient().performRequest("GET", "/", - new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue)); + Request request = new Request("GET", "/"); + request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), + new BasicHeader("Origin", corsValue)); + Response response = getRestClient().performRequest(request); assertResponseWithOriginheader(response, corsValue); - corsValue = "https://localhost:9200"; - response = getRestClient().performRequest("GET", "/", - new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue)); + corsValue = "https://localhost:9201"; + request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), + new BasicHeader("Origin", corsValue)); + response = getRestClient().performRequest(request); assertResponseWithOriginheader(response, corsValue); assertThat(response.getHeader("Access-Control-Allow-Credentials"), is("true")); } public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws IOException { + Request request = new Request("GET", "/"); + request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), + new BasicHeader("Origin", "http://evil-host:9200")); try { - getRestClient().performRequest("GET", "/", new BasicHeader("User-Agent", "Mozilla Bar"), - new BasicHeader("Origin", "http://evil-host:9200")); + getRestClient().performRequest(request); fail("request should have failed"); } catch(ResponseException e) { Response response = e.getResponse(); @@ -79,31 +84,38 @@ public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws IOExcep } public void testThatSendingNoOriginHeaderReturnsNoAccessControlHeader() throws IOException { - Response response = getRestClient().performRequest("GET", "/", new BasicHeader("User-Agent", "Mozilla Bar")); + Request request = new Request("GET", "/"); + request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar")); + Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); } public void testThatRegularExpressionIsNotAppliedWithoutCorrectBrowserOnMatch() throws IOException { - Response response = getRestClient().performRequest("GET", "/"); + Response response = getRestClient().performRequest(new Request("GET", "/")); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); } public void testThatPreFlightRequestWorksOnMatch() throws IOException { String corsValue = "http://localhost:9200"; - Response response = getRestClient().performRequest("OPTIONS", "/", - new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue), + Request request = new Request("OPTIONS", "/"); + request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), + new BasicHeader("Origin", corsValue), new BasicHeader("Access-Control-Request-Method", "GET")); + Response response = getRestClient().performRequest(request); assertResponseWithOriginheader(response, corsValue); assertNotNull(response.getHeader("Access-Control-Allow-Methods")); } public void testThatPreFlightRequestReturnsNullOnNonMatch() throws IOException { + String corsValue = "http://evil-host:9200"; + Request request = new Request("OPTIONS", "/"); + request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), + new BasicHeader("Origin", corsValue), + new BasicHeader("Access-Control-Request-Method", "GET")); try { - getRestClient().performRequest("OPTIONS", "/", new BasicHeader("User-Agent", "Mozilla Bar"), - new BasicHeader("Origin", "http://evil-host:9200"), - new BasicHeader("Access-Control-Request-Method", "GET")); + getRestClient().performRequest(request); fail("request should have failed"); } catch(ResponseException e) { Response response = e.getResponse(); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DeprecationHttpIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DeprecationHttpIT.java index 260041fdbda91..bacf5495ab7ae 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DeprecationHttpIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DeprecationHttpIT.java @@ -22,6 +22,7 @@ import org.apache.http.HttpEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationLogger; @@ -106,11 +107,10 @@ public void testUniqueDeprecationResponsesMergedTogether() throws IOException { final String commaSeparatedIndices = Stream.of(indices).collect(Collectors.joining(",")); - final String body = "{\"query\":{\"bool\":{\"filter\":[{\"" + TestDeprecatedQueryBuilder.NAME + "\":{}}]}}}"; - // trigger all index deprecations - Response response = getRestClient().performRequest("GET", "/" + commaSeparatedIndices + "/_search", - Collections.emptyMap(), new StringEntity(body, ContentType.APPLICATION_JSON)); + Request request = new Request("GET", "/" + commaSeparatedIndices + "/_search"); + request.setJsonEntity("{\"query\":{\"bool\":{\"filter\":[{\"" + TestDeprecatedQueryBuilder.NAME + "\":{}}]}}}"); + Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), equalTo(OK.getStatus())); final List deprecatedWarnings = getWarningHeaders(response.getHeaders()); @@ -162,8 +162,9 @@ private void doTestDeprecationWarningsAppearInHeaders() throws IOException { Collections.shuffle(settings, random()); // trigger all deprecations - Response response = getRestClient().performRequest("GET", "/_test_cluster/deprecated_settings", - Collections.emptyMap(), buildSettingsRequest(settings, useDeprecatedField)); + Request request = new Request("GET", "/_test_cluster/deprecated_settings"); + request.setEntity(buildSettingsRequest(settings, useDeprecatedField)); + Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), equalTo(OK.getStatus())); final List deprecatedWarnings = getWarningHeaders(response.getHeaders()); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsDisabledIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsDisabledIT.java index fa71822e79e57..6b2f49c583317 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsDisabledIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsDisabledIT.java @@ -20,12 +20,11 @@ package org.elasticsearch.http; import java.io.IOException; -import java.util.Collections; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -49,8 +48,10 @@ protected Settings nodeSettings(int nodeOrdinal) { } public void testThatErrorTraceParamReturns400() throws IOException { + Request request = new Request("DELETE", "/"); + request.addParameter("error_trace", "true"); ResponseException e = expectThrows(ResponseException.class, () -> - getRestClient().performRequest("DELETE", "/", Collections.singletonMap("error_trace", "true"))); + getRestClient().performRequest(request)); Response response = e.getResponse(); assertThat(response.getHeader("Content-Type"), is("application/json; charset=UTF-8")); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsEnabledIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsEnabledIT.java index d0b80595a26ee..db37034973cf8 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsEnabledIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsEnabledIT.java @@ -20,11 +20,11 @@ package org.elasticsearch.http; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import java.io.IOException; -import java.util.Collections; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.not; @@ -36,7 +36,9 @@ public class DetailedErrorsEnabledIT extends HttpSmokeTestCase { public void testThatErrorTraceWorksByDefault() throws IOException { try { - getRestClient().performRequest("DELETE", "/", Collections.singletonMap("error_trace", "true")); + Request request = new Request("DELETE", "/"); + request.addParameter("error_trace", "true"); + getRestClient().performRequest(request); fail("request should have failed"); } catch(ResponseException e) { Response response = e.getResponse(); @@ -47,7 +49,7 @@ public void testThatErrorTraceWorksByDefault() throws IOException { } try { - getRestClient().performRequest("DELETE", "/"); + getRestClient().performRequest(new Request("DELETE", "/")); fail("request should have failed"); } catch(ResponseException e) { Response response = e.getResponse(); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java index 20ddd0d230ad4..6af08577393d9 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java @@ -19,41 +19,40 @@ package org.elasticsearch.http; import org.apache.http.HttpHeaders; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; -import org.elasticsearch.client.RestClient; import org.elasticsearch.test.rest.ESRestTestCase; import java.io.IOException; -import java.util.Collections; public class HttpCompressionIT extends ESRestTestCase { private static final String GZIP_ENCODING = "gzip"; - private static final StringEntity SAMPLE_DOCUMENT = new StringEntity("{\n" + + private static final String SAMPLE_DOCUMENT = "{\n" + " \"name\": {\n" + " \"first name\": \"Steve\",\n" + " \"last name\": \"Jobs\"\n" + " }\n" + - "}", ContentType.APPLICATION_JSON); + "}"; public void testCompressesResponseIfRequested() throws IOException { - RestClient client = client(); - Response response = client.performRequest("GET", "/", new BasicHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING)); + Request request = new Request("GET", "/"); + request.setHeaders(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING)); + Response response = client().performRequest(request); assertEquals(200, response.getStatusLine().getStatusCode()); assertEquals(GZIP_ENCODING, response.getHeader(HttpHeaders.CONTENT_ENCODING)); } public void testUncompressedResponseByDefault() throws IOException { - RestClient client = client(); - Response response = client.performRequest("GET", "/"); + Response response = client().performRequest(new Request("GET", "/")); assertEquals(200, response.getStatusLine().getStatusCode()); assertNull(response.getHeader(HttpHeaders.CONTENT_ENCODING)); - response = client.performRequest("POST", "/company/employees/1", Collections.emptyMap(), SAMPLE_DOCUMENT); + Request request = new Request("POST", "/company/employees/1"); + request.setJsonEntity(SAMPLE_DOCUMENT); + response = client().performRequest(request); assertEquals(201, response.getStatusLine().getStatusCode()); assertNull(response.getHeader(HttpHeaders.CONTENT_ENCODING)); } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java index 0a2d7ed9b06f2..e1d55afea1b54 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java @@ -21,6 +21,7 @@ import org.apache.http.message.BasicHeader; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -45,10 +46,10 @@ public void testNoHandlerRespectsAcceptHeader() throws IOException { private void runTestNoHandlerRespectsAcceptHeader( final String accept, final String contentType, final String expect) throws IOException { - final ResponseException e = - expectThrows( - ResponseException.class, - () -> getRestClient().performRequest("GET", "/foo/bar/baz/qux/quux", new BasicHeader("Accept", accept))); + Request request = new Request("GET", "/foo/bar/baz/qux/quux"); + request.setHeaders(new BasicHeader("Accept", accept)); + final ResponseException e = expectThrows(ResponseException.class, + () -> getRestClient().performRequest(request)); final Response response = e.getResponse(); assertThat(response.getHeader("Content-Type"), equalTo(contentType)); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java index 7d413cca97703..b4dbc50d52db7 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java @@ -19,9 +19,9 @@ package org.elasticsearch.http; import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -53,7 +53,7 @@ protected Collection> nodePlugins() { public void testThatSettingHeadersWorks() throws IOException { ensureGreen(); try { - getRestClient().performRequest("GET", "/_protected"); + getRestClient().performRequest(new Request("GET", "/_protected")); fail("request should have failed"); } catch(ResponseException e) { Response response = e.getResponse(); @@ -61,7 +61,9 @@ public void testThatSettingHeadersWorks() throws IOException { assertThat(response.getHeader("Secret"), equalTo("required")); } - Response authResponse = getRestClient().performRequest("GET", "/_protected", new BasicHeader("Secret", "password")); + Request request = new Request("GET", "/_protected"); + request.setHeaders(new BasicHeader("Secret", "password")); + Response authResponse = getRestClient().performRequest(request); assertThat(authResponse.getStatusLine().getStatusCode(), equalTo(200)); assertThat(authResponse.getHeader("Secret"), equalTo("granted")); } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/RestHttpResponseHeadersIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/RestHttpResponseHeadersIT.java index c9e7dc451a053..901bffc9553d4 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/RestHttpResponseHeadersIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/RestHttpResponseHeadersIT.java @@ -18,6 +18,7 @@ package org.elasticsearch.http; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.test.rest.ESRestTestCase; @@ -46,7 +47,7 @@ public class RestHttpResponseHeadersIT extends ESRestTestCase { * - Options). */ public void testValidEndpointOptionsResponseHttpHeader() throws Exception { - Response response = client().performRequest("OPTIONS", "/_tasks"); + Response response = client().performRequest(new Request("OPTIONS", "/_tasks")); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Allow"), notNullValue()); List responseAllowHeaderStringArray = @@ -64,7 +65,7 @@ public void testValidEndpointOptionsResponseHttpHeader() throws Exception { */ public void testUnsupportedMethodResponseHttpHeader() throws Exception { try { - client().performRequest("DELETE", "/_tasks"); + client().performRequest(new Request("DELETE", "/_tasks")); fail("Request should have failed with 405 error"); } catch (ResponseException e) { Response response = e.getResponse(); @@ -85,9 +86,9 @@ public void testUnsupportedMethodResponseHttpHeader() throws Exception { * 17853 for more information). */ public void testIndexSettingsPostRequest() throws Exception { - client().performRequest("PUT", "/testindex"); + client().performRequest(new Request("PUT", "/testindex")); try { - client().performRequest("POST", "/testindex/_settings"); + client().performRequest(new Request("POST", "/testindex/_settings")); fail("Request should have failed with 405 error"); } catch (ResponseException e) { Response response = e.getResponse(); diff --git a/qa/smoke-test-multinode/build.gradle b/qa/smoke-test-multinode/build.gradle index 5df77bd0d9513..9d299e16f0210 100644 --- a/qa/smoke-test-multinode/build.gradle +++ b/qa/smoke-test-multinode/build.gradle @@ -27,3 +27,13 @@ integTest { integTestCluster { numNodes = 2 } + +integTestRunner { + if ('zip'.equals(integTestCluster.distribution)) { + systemProperty 'tests.rest.blacklist', [ + 'cat.templates/10_basic/No templates', + 'cat.templates/10_basic/Sort templates', + 'cat.templates/10_basic/Multiple template', + ].join(',') + } +} diff --git a/qa/smoke-test-rank-eval-with-mustache/build.gradle b/qa/smoke-test-rank-eval-with-mustache/build.gradle index 7274e65f4e1bd..122c2603719a0 100644 --- a/qa/smoke-test-rank-eval-with-mustache/build.gradle +++ b/qa/smoke-test-rank-eval-with-mustache/build.gradle @@ -26,3 +26,11 @@ dependencies { testCompile project(path: ':modules:lang-mustache', configuration: 'runtime') } +/* + * One of the integration tests doesn't work with the zip distribution + * and will be fixed later. + * Tracked by https://github.com/elastic/elasticsearch/issues/30628 + */ +if ("zip".equals(integTestCluster.distribution)) { + integTestRunner.enabled = false +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml new file mode 100644 index 0000000000000..9dd54811fabaa --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml @@ -0,0 +1,46 @@ +setup: + - skip: + version: " - 6.4.0" + reason: "moving_fn added in 6.4.0" + +--- +"Bad window": + + - do: + catch: /\[window\] must be a positive, non-zero integer\./ + search: + body: + size: 0 + aggs: + the_histo: + date_histogram: + field: "date" + interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: -1 + script: "MovingFunctions.windowMax(values)" + +--- +"Not under date_histo": + + - do: + catch: /\[window\] must be a positive, non-zero integer\./ + search: + body: + size: 0 + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: -1 + script: "MovingFunctions.windowMax(values)" + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml index 841d5cf611bab..19593decb6533 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml @@ -195,7 +195,13 @@ setup: --- "Test typed keys parameter for date_histogram aggregation and max_bucket pipeline aggregation": + - skip: + features: warnings + version: " - 6.4.0" + reason: "deprecation added in 6.4.0" - do: + warnings: + - 'The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.' search: typed_keys: true body: diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index 0427685b8ef4f..dff14bc8b393b 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -242,6 +242,35 @@ public static boolean reThrowIfNotNull(@Nullable Throwable e) { return true; } + /** + * If the specified cause is an unrecoverable error, this method will rethrow the cause on a separate thread so that it can not be + * caught and bubbles up to the uncaught exception handler. + * + * @param throwable the throwable to test + */ + public static void dieOnError(Throwable throwable) { + final Optional maybeError = ExceptionsHelper.maybeError(throwable, logger); + if (maybeError.isPresent()) { + /* + * Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, Netty wraps too many + * invocations of user-code in try/catch blocks that swallow all throwables. This means that a rethrow here will not bubble up + * to where we want it to. So, we fork a thread and throw the exception from there where Netty can not get to it. We do not wrap + * the exception so as to not lose the original cause during exit. + */ + try { + // try to log the current stack trace + final String formatted = ExceptionsHelper.formatStackTrace(Thread.currentThread().getStackTrace()); + logger.error("fatal error\n{}", formatted); + } finally { + new Thread( + () -> { + throw maybeError.get(); + }) + .start(); + } + } + } + /** * Deduplicate the failures by exception message and index. */ diff --git a/server/src/main/java/org/elasticsearch/action/TaskOperationFailure.java b/server/src/main/java/org/elasticsearch/action/TaskOperationFailure.java index 885647441d01f..8740c446b068e 100644 --- a/server/src/main/java/org/elasticsearch/action/TaskOperationFailure.java +++ b/server/src/main/java/org/elasticsearch/action/TaskOperationFailure.java @@ -21,17 +21,20 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContent.Params; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.RestStatus; import java.io.IOException; import static org.elasticsearch.ExceptionsHelper.detailedMessage; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; /** * Information about task operation failures @@ -39,7 +42,10 @@ * The class is final due to serialization limitations */ public final class TaskOperationFailure implements Writeable, ToXContentFragment { - + private static final String TASK_ID = "task_id"; + private static final String NODE_ID = "node_id"; + private static final String STATUS = "status"; + private static final String REASON = "reason"; private final String nodeId; private final long taskId; @@ -48,6 +54,21 @@ public final class TaskOperationFailure implements Writeable, ToXContentFragment private final RestStatus status; + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("task_info", true, constructorObjects -> { + int i = 0; + String nodeId = (String) constructorObjects[i++]; + long taskId = (long) constructorObjects[i++]; + ElasticsearchException reason = (ElasticsearchException) constructorObjects[i]; + return new TaskOperationFailure(nodeId, taskId, reason); + }); + + static { + PARSER.declareString(constructorArg(), new ParseField(NODE_ID)); + PARSER.declareLong(constructorArg(), new ParseField(TASK_ID)); + PARSER.declareObject(constructorArg(), (parser, c) -> ElasticsearchException.fromXContent(parser), new ParseField(REASON)); + } + public TaskOperationFailure(String nodeId, long taskId, Exception e) { this.nodeId = nodeId; this.taskId = taskId; @@ -98,13 +119,17 @@ public String toString() { return "[" + nodeId + "][" + taskId + "] failed, reason [" + getReason() + "]"; } + public static TaskOperationFailure fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field("task_id", getTaskId()); - builder.field("node_id", getNodeId()); - builder.field("status", status.name()); + builder.field(TASK_ID, getTaskId()); + builder.field(NODE_ID, getNodeId()); + builder.field(STATUS, status.name()); if (reason != null) { - builder.field("reason"); + builder.field(REASON); builder.startObject(); ElasticsearchException.generateThrowableXContent(builder, params, reason); builder.endObject(); @@ -112,5 +137,4 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java index 88d8ff4679917..1233b7143ab77 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java @@ -19,16 +19,19 @@ package org.elasticsearch.action.admin.cluster.node.tasks.list; -import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.tasks.BaseTasksResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; @@ -40,10 +43,16 @@ import java.util.Map; import java.util.stream.Collectors; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + /** * Returns the list of tasks currently running on the nodes */ public class ListTasksResponse extends BaseTasksResponse implements ToXContentObject { + private static final String TASKS = "tasks"; + private static final String TASK_FAILURES = "task_failures"; + private static final String NODE_FAILURES = "node_failures"; private List tasks; @@ -56,11 +65,31 @@ public ListTasksResponse() { } public ListTasksResponse(List tasks, List taskFailures, - List nodeFailures) { + List nodeFailures) { super(taskFailures, nodeFailures); this.tasks = tasks == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(tasks)); } + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("list_tasks_response", true, + constructingObjects -> { + int i = 0; + @SuppressWarnings("unchecked") + List tasks = (List) constructingObjects[i++]; + @SuppressWarnings("unchecked") + List tasksFailures = (List) constructingObjects[i++]; + @SuppressWarnings("unchecked") + List nodeFailures = (List) constructingObjects[i]; + return new ListTasksResponse(tasks, tasksFailures, nodeFailures); + }); + + static { + PARSER.declareObjectArray(constructorArg(), TaskInfo.PARSER, new ParseField(TASKS)); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> TaskOperationFailure.fromXContent(p), new ParseField(TASK_FAILURES)); + PARSER.declareObjectArray(optionalConstructorArg(), + (parser, c) -> ElasticsearchException.fromXContent(parser), new ParseField(NODE_FAILURES)); + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -159,7 +188,7 @@ public XContentBuilder toXContentGroupedByNode(XContentBuilder builder, Params p builder.endObject(); } } - builder.startObject("tasks"); + builder.startObject(TASKS); for(TaskInfo task : entry.getValue()) { builder.startObject(task.getTaskId().toString()); task.toXContent(builder, params); @@ -177,7 +206,7 @@ public XContentBuilder toXContentGroupedByNode(XContentBuilder builder, Params p */ public XContentBuilder toXContentGroupedByParents(XContentBuilder builder, Params params) throws IOException { toXContentCommon(builder, params); - builder.startObject("tasks"); + builder.startObject(TASKS); for (TaskGroup group : getTaskGroups()) { builder.field(group.getTaskInfo().getTaskId().toString()); group.toXContent(builder, params); @@ -191,7 +220,7 @@ public XContentBuilder toXContentGroupedByParents(XContentBuilder builder, Param */ public XContentBuilder toXContentGroupedByNone(XContentBuilder builder, Params params) throws IOException { toXContentCommon(builder, params); - builder.startArray("tasks"); + builder.startArray(TASKS); for (TaskInfo taskInfo : getTasks()) { builder.startObject(); taskInfo.toXContent(builder, params); @@ -204,14 +233,14 @@ public XContentBuilder toXContentGroupedByNone(XContentBuilder builder, Params p @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - toXContentGroupedByParents(builder, params); + toXContentGroupedByNone(builder, params); builder.endObject(); return builder; } private void toXContentCommon(XContentBuilder builder, Params params) throws IOException { if (getTaskFailures() != null && getTaskFailures().size() > 0) { - builder.startArray("task_failures"); + builder.startArray(TASK_FAILURES); for (TaskOperationFailure ex : getTaskFailures()){ builder.startObject(); builder.value(ex); @@ -221,8 +250,8 @@ private void toXContentCommon(XContentBuilder builder, Params params) throws IOE } if (getNodeFailures() != null && getNodeFailures().size() > 0) { - builder.startArray("node_failures"); - for (FailedNodeException ex : getNodeFailures()) { + builder.startArray(NODE_FAILURES); + for (ElasticsearchException ex : getNodeFailures()) { builder.startObject(); ex.toXContent(builder, params); builder.endObject(); @@ -231,6 +260,10 @@ private void toXContentCommon(XContentBuilder builder, Params params) throws IOE } } + public static ListTasksResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + @Override public String toString() { return Strings.toString(this); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java index ad81302918eb3..82f0e38572e77 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; @@ -44,7 +45,7 @@ * Registers a repository with given name, type and settings. If the repository with the same name already * exists in the cluster, the new repository will replace the existing repository. */ -public class PutRepositoryRequest extends AcknowledgedRequest { +public class PutRepositoryRequest extends AcknowledgedRequest implements ToXContentObject { private String name; @@ -232,4 +233,19 @@ public void writeTo(StreamOutput out) throws IOException { writeSettingsToStream(settings, out); out.writeBoolean(verify); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("name", name); + builder.field("type", type); + + builder.startObject("settings"); + settings.toXContent(builder, params); + builder.endObject(); + + builder.field("verify", verify); + builder.endObject(); + return builder; + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponse.java index c2b45743447f2..e58a1d9d147f9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponse.java @@ -22,6 +22,8 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -30,6 +32,13 @@ */ public class PutRepositoryResponse extends AcknowledgedResponse { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("put_repository", + true, args -> new PutRepositoryResponse((boolean) args[0])); + + static { + declareAcknowledgedField(PARSER); + } + PutRepositoryResponse() { } @@ -49,4 +58,7 @@ public void writeTo(StreamOutput out) throws IOException { writeAcknowledged(out); } + public static PutRepositoryResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index 52fe03f58c28d..a7a5548552be2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -71,8 +71,9 @@ protected ClusterBlockException checkBlock(CreateSnapshotRequest request, Cluste @Override protected void masterOperation(final CreateSnapshotRequest request, ClusterState state, final ActionListener listener) { + final String snapshotName = indexNameExpressionResolver.resolveDateMathExpression(request.snapshot()); SnapshotsService.SnapshotRequest snapshotRequest = - new SnapshotsService.SnapshotRequest(request.repository(), request.snapshot(), "create_snapshot [" + request.snapshot() + "]") + new SnapshotsService.SnapshotRequest(request.repository(), snapshotName, "create_snapshot [" + snapshotName + "]") .indices(request.indices()) .indicesOptions(request.indicesOptions()) .partial(request.partial()) @@ -87,7 +88,7 @@ public void onResponse() { @Override public void onSnapshotCompletion(Snapshot snapshot, SnapshotInfo snapshotInfo) { if (snapshot.getRepository().equals(request.repository()) && - snapshot.getSnapshotId().getName().equals(request.snapshot())) { + snapshot.getSnapshotId().getName().equals(snapshotName)) { listener.onResponse(new CreateSnapshotResponse(snapshotInfo)); snapshotsService.removeListener(this); } @@ -96,7 +97,7 @@ public void onSnapshotCompletion(Snapshot snapshot, SnapshotInfo snapshotInfo) { @Override public void onSnapshotFailure(Snapshot snapshot, Exception e) { if (snapshot.getRepository().equals(request.repository()) && - snapshot.getSnapshotId().getName().equals(request.snapshot())) { + snapshot.getSnapshotId().getName().equals(snapshotName)) { listener.onFailure(e); snapshotsService.removeListener(this); } diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index 64c26d6b94aa5..b284ec87dd42c 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -19,12 +19,17 @@ package org.elasticsearch.action.support; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.rest.RestRequest; import java.io.IOException; +import java.util.Collection; +import java.util.EnumSet; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringArrayValue; @@ -35,41 +40,155 @@ */ public class IndicesOptions { - private static final IndicesOptions[] VALUES; + public enum WildcardStates { + OPEN, + CLOSED; - private static final byte IGNORE_UNAVAILABLE = 1; - private static final byte ALLOW_NO_INDICES = 2; - private static final byte EXPAND_WILDCARDS_OPEN = 4; - private static final byte EXPAND_WILDCARDS_CLOSED = 8; - private static final byte FORBID_ALIASES_TO_MULTIPLE_INDICES = 16; - private static final byte FORBID_CLOSED_INDICES = 32; - private static final byte IGNORE_ALIASES = 64; + public static final EnumSet NONE = EnumSet.noneOf(WildcardStates.class); - private static final byte STRICT_EXPAND_OPEN = 6; - private static final byte LENIENT_EXPAND_OPEN = 7; - private static final byte STRICT_EXPAND_OPEN_CLOSED = 14; - private static final byte STRICT_EXPAND_OPEN_FORBID_CLOSED = 38; - private static final byte STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED = 48; + public static EnumSet parseParameter(Object value, EnumSet defaultStates) { + if (value == null) { + return defaultStates; + } - static { - short max = 1 << 7; - VALUES = new IndicesOptions[max]; - for (short id = 0; id < max; id++) { - VALUES[id] = new IndicesOptions((byte)id); + Set states = new HashSet<>(); + String[] wildcards = nodeStringArrayValue(value); + for (String wildcard : wildcards) { + if ("open".equals(wildcard)) { + states.add(OPEN); + } else if ("closed".equals(wildcard)) { + states.add(CLOSED); + } else if ("none".equals(wildcard)) { + states.clear(); + } else if ("all".equals(wildcard)) { + states.add(OPEN); + states.add(CLOSED); + } else { + throw new IllegalArgumentException("No valid expand wildcard value [" + wildcard + "]"); + } + } + + return states.isEmpty() ? NONE : EnumSet.copyOf(states); } } - private final byte id; + public enum Option { + IGNORE_UNAVAILABLE, + IGNORE_ALIASES, + ALLOW_NO_INDICES, + FORBID_ALIASES_TO_MULTIPLE_INDICES, + FORBID_CLOSED_INDICES; + + public static final EnumSet